blob: a2896944aa377b7feef6417720348c02c3d8eecb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070037 * request_sock handling and moved
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -080040 * Added new listen semantics.
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
Joe Perchesafd465032012-03-12 07:03:32 +000053#define pr_fmt(fmt) "TCP: " fmt
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Herbert Xueb4dea52008-12-29 23:04:08 -080055#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090064#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020066#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <net/icmp.h>
Arnaldo Carvalho de Melo304a1612005-08-09 19:59:20 -070068#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <net/tcp.h>
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -030070#include <net/transp_v6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <net/ipv6.h>
72#include <net/inet_common.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080073#include <net/timewait_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <net/xfrm.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070075#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030076#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78#include <linux/inet.h>
79#include <linux/ipv6.h>
80#include <linux/stddef.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
Ivan Delalande67973182017-06-15 18:07:06 -070083#include <linux/inetdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Herbert Xucf80e0e2016-01-24 21:20:23 +080085#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080086#include <linux/scatterlist.h>
87
Song Liuc24b14c2017-10-23 09:20:24 -070088#include <trace/events/tcp.h>
89
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080090#ifdef CONFIG_TCP_MD5SIG
Eric Dumazeta915da9b2012-01-31 05:18:33 +000091static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -040092 __be32 daddr, __be32 saddr, const struct tcphdr *th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080093#endif
94
Eric Dumazet5caea4e2008-11-20 00:40:07 -080095struct inet_hashinfo tcp_hashinfo;
Eric Dumazet4bc2f182010-07-09 21:22:10 +000096EXPORT_SYMBOL(tcp_hashinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Eric Dumazet84b114b2017-05-05 06:56:54 -070098static u32 tcp_v4_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700100 return secure_tcp_seq(ip_hdr(skb)->daddr,
101 ip_hdr(skb)->saddr,
102 tcp_hdr(skb)->dest,
103 tcp_hdr(skb)->source);
104}
105
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700106static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700107{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700108 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800111int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112{
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700113 const struct inet_timewait_sock *tw = inet_twsk(sktw);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800114 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
115 struct tcp_sock *tp = tcp_sk(sk);
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700116 int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
117
118 if (reuse == 2) {
119 /* Still does not detect *everything* that goes through
120 * lo, since we require a loopback src or dst address
121 * or direct binding to 'lo' interface.
122 */
123 bool loopback = false;
124 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
125 loopback = true;
126#if IS_ENABLED(CONFIG_IPV6)
127 if (tw->tw_family == AF_INET6) {
128 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
129 (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
130 (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
131 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
132 (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
133 (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
134 loopback = true;
135 } else
136#endif
137 {
138 if (ipv4_is_loopback(tw->tw_daddr) ||
139 ipv4_is_loopback(tw->tw_rcv_saddr))
140 loopback = true;
141 }
142 if (!loopback)
143 reuse = 0;
144 }
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800145
146 /* With PAWS, it is safe from the viewpoint
147 of data integrity. Even without PAWS it is safe provided sequence
148 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
149
150 Actually, the idea is close to VJ's one, only timestamp cache is
151 held not per host, but per port pair and TW bucket is used as state
152 holder.
153
154 If TW bucket has been already destroyed we fall back to VJ's scheme
155 and use initial timestamp retrieved from peer table.
156 */
157 if (tcptw->tw_ts_recent_stamp &&
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200158 (!twp || (reuse && time_after32(ktime_get_seconds(),
159 tcptw->tw_ts_recent_stamp)))) {
Stefan Baranoff21684dc2018-07-10 17:25:20 -0400160 /* In case of repair and re-using TIME-WAIT sockets we still
161 * want to be sure that it is safe as above but honor the
162 * sequence numbers and time stamps set as part of the repair
163 * process.
164 *
165 * Without this check re-using a TIME-WAIT socket with TCP
166 * repair would accumulate a -1 on the repair assigned
167 * sequence number. The first time it is reused the sequence
168 * is -1, the second time -2, etc. This fixes that issue
169 * without appearing to create any others.
170 */
171 if (likely(!tp->repair)) {
172 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
173 if (tp->write_seq == 0)
174 tp->write_seq = 1;
175 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
176 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
177 }
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800178 sock_hold(sktw);
179 return 1;
180 }
181
182 return 0;
183}
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800184EXPORT_SYMBOL_GPL(tcp_twsk_unique);
185
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700186static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
187 int addr_len)
188{
189 /* This check is replicated from tcp_v4_connect() and intended to
190 * prevent BPF program called below from accessing bytes that are out
191 * of the bound specified by user in addr_len.
192 */
193 if (addr_len < sizeof(struct sockaddr_in))
194 return -EINVAL;
195
196 sock_owned_by_me(sk);
197
198 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
199}
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201/* This will initiate an outgoing connection. */
202int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
203{
David S. Miller2d7192d2011-04-26 13:28:44 -0700204 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 struct inet_sock *inet = inet_sk(sk);
206 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerdca8b082011-02-24 13:38:12 -0800207 __be16 orig_sport, orig_dport;
Al Virobada8ad2006-09-26 21:27:15 -0700208 __be32 daddr, nexthop;
David S. Millerda905bd2011-05-06 16:11:19 -0700209 struct flowi4 *fl4;
David S. Miller2d7192d2011-04-26 13:28:44 -0700210 struct rtable *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 int err;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000212 struct ip_options_rcu *inet_opt;
Haishuang Yan1946e672016-12-28 17:52:32 +0800213 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 if (addr_len < sizeof(struct sockaddr_in))
216 return -EINVAL;
217
218 if (usin->sin_family != AF_INET)
219 return -EAFNOSUPPORT;
220
221 nexthop = daddr = usin->sin_addr.s_addr;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000222 inet_opt = rcu_dereference_protected(inet->inet_opt,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200223 lockdep_sock_is_held(sk));
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000224 if (inet_opt && inet_opt->opt.srr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 if (!daddr)
226 return -EINVAL;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000227 nexthop = inet_opt->opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 }
229
David S. Millerdca8b082011-02-24 13:38:12 -0800230 orig_sport = inet->inet_sport;
231 orig_dport = usin->sin_port;
David S. Millerda905bd2011-05-06 16:11:19 -0700232 fl4 = &inet->cork.fl.u.ip4;
233 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800234 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
235 IPPROTO_TCP,
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200236 orig_sport, orig_dport, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800237 if (IS_ERR(rt)) {
238 err = PTR_ERR(rt);
239 if (err == -ENETUNREACH)
Eric Dumazetf1d8cba2013-11-28 09:51:22 -0800240 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800241 return err;
Wei Dong584bdf82007-05-31 22:49:28 -0700242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
244 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
245 ip_rt_put(rt);
246 return -ENETUNREACH;
247 }
248
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000249 if (!inet_opt || !inet_opt->opt.srr)
David S. Millerda905bd2011-05-06 16:11:19 -0700250 daddr = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000252 if (!inet->inet_saddr)
David S. Millerda905bd2011-05-06 16:11:19 -0700253 inet->inet_saddr = fl4->saddr;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700254 sk_rcv_saddr_set(sk, inet->inet_saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000256 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 /* Reset inherited state */
258 tp->rx_opt.ts_recent = 0;
259 tp->rx_opt.ts_recent_stamp = 0;
Pavel Emelyanovee995282012-04-19 03:40:39 +0000260 if (likely(!tp->repair))
261 tp->write_seq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 }
263
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000264 inet->inet_dport = usin->sin_port;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700265 sk_daddr_set(sk, daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800267 inet_csk(sk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000268 if (inet_opt)
269 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000271 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
273 /* Socket identity is still unknown (sport may be zero).
274 * However we set state to SYN-SENT and not releasing socket
275 * lock select source port, enter ourselves into the hash tables and
276 * complete initialization after this.
277 */
278 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800279 err = inet_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 if (err)
281 goto failure;
282
Tom Herbert877d1f62015-07-28 16:02:05 -0700283 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530284
David S. Millerda905bd2011-05-06 16:11:19 -0700285 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800286 inet->inet_sport, inet->inet_dport, sk);
287 if (IS_ERR(rt)) {
288 err = PTR_ERR(rt);
289 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 goto failure;
David S. Millerb23dd4f2011-03-02 14:31:35 -0800291 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 /* OK, now commit destination to socket. */
Herbert Xubcd76112006-06-30 13:36:35 -0700293 sk->sk_gso_type = SKB_GSO_TCPV4;
Changli Gaod8d1f302010-06-10 23:31:35 -0700294 sk_setup_caps(sk, &rt->dst);
Wei Wang19f6d3f2017-01-23 10:59:22 -0800295 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300297 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300298 if (!tp->write_seq)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700299 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
300 inet->inet_daddr,
301 inet->inet_sport,
302 usin->sin_port);
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700303 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
304 inet->inet_saddr,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700305 inet->inet_daddr);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300306 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000308 inet->inet_id = tp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
Wei Wang19f6d3f2017-01-23 10:59:22 -0800310 if (tcp_fastopen_defer_connect(sk, &err))
311 return err;
312 if (err)
313 goto failure;
314
Andrey Vagin2b916472012-11-22 01:13:58 +0000315 err = tcp_connect(sk);
Pavel Emelyanovee995282012-04-19 03:40:39 +0000316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 if (err)
318 goto failure;
319
320 return 0;
321
322failure:
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200323 /*
324 * This unhashes the socket and releases the local port,
325 * if necessary.
326 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 tcp_set_state(sk, TCP_CLOSE);
328 ip_rt_put(rt);
329 sk->sk_route_caps = 0;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000330 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 return err;
332}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000333EXPORT_SYMBOL(tcp_v4_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335/*
Eric Dumazet563d34d2012-07-23 09:48:52 +0200336 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
337 * It can be called through tcp_release_cb() if socket was owned by user
338 * at the time tcp_v4_err() was called to handle ICMP message.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 */
Neal Cardwell4fab9072014-08-14 12:40:05 -0400340void tcp_v4_mtu_reduced(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800343 struct dst_entry *dst;
344 u32 mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800346 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
347 return;
348 mtu = tcp_sk(sk)->mtu_info;
David S. Miller80d0a692012-07-16 03:28:06 -0700349 dst = inet_csk_update_pmtu(sk, mtu);
350 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 return;
352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 /* Something is about to be wrong... Remember soft error
354 * for the case, if this connection will not able to recover.
355 */
356 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
357 sk->sk_err_soft = EMSGSIZE;
358
359 mtu = dst_mtu(dst);
360
361 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +0100362 ip_sk_accept_pmtu(sk) &&
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800363 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 tcp_sync_mss(sk, mtu);
365
366 /* Resend the TCP packet because it's
367 * clear that the old packet has been
368 * dropped. This is the new "fast" path mtu
369 * discovery.
370 */
371 tcp_simple_retransmit(sk);
372 } /* else let the usual retransmit timer handle it */
373}
Neal Cardwell4fab9072014-08-14 12:40:05 -0400374EXPORT_SYMBOL(tcp_v4_mtu_reduced);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375
David S. Miller55be7a92012-07-11 21:27:49 -0700376static void do_redirect(struct sk_buff *skb, struct sock *sk)
377{
378 struct dst_entry *dst = __sk_dst_check(sk, 0);
379
David S. Miller1ed5c482012-07-12 00:41:25 -0700380 if (dst)
David S. Miller6700c272012-07-17 03:29:28 -0700381 dst->ops->redirect(dst, sk, skb);
David S. Miller55be7a92012-07-11 21:27:49 -0700382}
383
Eric Dumazet26e37362015-03-22 10:22:22 -0700384
385/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
Eric Dumazet9cf74902016-02-02 19:31:12 -0800386void tcp_req_err(struct sock *sk, u32 seq, bool abort)
Eric Dumazet26e37362015-03-22 10:22:22 -0700387{
388 struct request_sock *req = inet_reqsk(sk);
389 struct net *net = sock_net(sk);
390
391 /* ICMPs are not backlogged, hence we cannot get
392 * an established socket here.
393 */
Eric Dumazet26e37362015-03-22 10:22:22 -0700394 if (seq != tcp_rsk(req)->snt_isn) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700395 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800396 } else if (abort) {
Eric Dumazet26e37362015-03-22 10:22:22 -0700397 /*
398 * Still in SYN_RECV, just remove it silently.
399 * There is no good way to pass the error to the newly
400 * created socket, and POSIX does not want network
401 * errors returned from accept().
402 */
Fan Duc6973662015-03-23 15:00:41 -0700403 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
Eric Dumazet9caad862016-04-01 08:52:20 -0700404 tcp_listendrop(req->rsk_listener);
Eric Dumazet26e37362015-03-22 10:22:22 -0700405 }
Eric Dumazetef84d8c2015-10-14 11:16:26 -0700406 reqsk_put(req);
Eric Dumazet26e37362015-03-22 10:22:22 -0700407}
408EXPORT_SYMBOL(tcp_req_err);
409
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410/*
411 * This routine is called by the ICMP module when it gets some
412 * sort of error condition. If err < 0 then the socket should
413 * be closed and the error returned to the user. If err > 0
414 * it's just the icmp type << 8 | icmp code. After adjustment
415 * header points to the first 8 bytes of the tcp header. We need
416 * to find the appropriate port.
417 *
418 * The locking strategy used here is very "optimistic". When
419 * someone else accesses the socket the ICMP is just dropped
420 * and for some paths there is no check at all.
421 * A more general error queue to queue errors for later handling
422 * is probably better.
423 *
424 */
425
Stefano Brivio32bbd872018-11-08 12:19:21 +0100426int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000428 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000429 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000430 struct inet_connection_sock *icsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 struct tcp_sock *tp;
432 struct inet_sock *inet;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000433 const int type = icmp_hdr(icmp_skb)->type;
434 const int code = icmp_hdr(icmp_skb)->code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 struct sock *sk;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000436 struct sk_buff *skb;
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700437 struct request_sock *fastopen;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700438 u32 seq, snd_una;
439 s32 remaining;
440 u32 delta_us;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 int err;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000442 struct net *net = dev_net(icmp_skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
Eric Dumazet26e37362015-03-22 10:22:22 -0700444 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
445 th->dest, iph->saddr, ntohs(th->source),
David Ahern3fa6f612017-08-07 08:44:17 -0700446 inet_iif(icmp_skb), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 if (!sk) {
Eric Dumazet5d3848b2016-04-27 16:44:29 -0700448 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100449 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 }
451 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700452 inet_twsk_put(inet_twsk(sk));
Stefano Brivio32bbd872018-11-08 12:19:21 +0100453 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
Eric Dumazet26e37362015-03-22 10:22:22 -0700455 seq = ntohl(th->seq);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100456 if (sk->sk_state == TCP_NEW_SYN_RECV) {
457 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
458 type == ICMP_TIME_EXCEEDED ||
459 (type == ICMP_DEST_UNREACH &&
460 (code == ICMP_NET_UNREACH ||
461 code == ICMP_HOST_UNREACH)));
462 return 0;
463 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
465 bh_lock_sock(sk);
466 /* If too many ICMPs get dropped on busy
467 * servers this needs to be solved differently.
Eric Dumazet563d34d2012-07-23 09:48:52 +0200468 * We do take care of PMTU discovery (RFC1191) special case :
469 * we can receive locally generated ICMP messages while socket is held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 */
Eric Dumazetb74aa932013-01-19 16:10:37 +0000471 if (sock_owned_by_user(sk)) {
472 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700473 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Eric Dumazetb74aa932013-01-19 16:10:37 +0000474 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 if (sk->sk_state == TCP_CLOSE)
476 goto out;
477
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000478 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700479 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000480 goto out;
481 }
482
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000483 icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700485 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
486 fastopen = tp->fastopen_rsk;
487 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700489 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700490 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 goto out;
492 }
493
494 switch (type) {
David S. Miller55be7a92012-07-11 21:27:49 -0700495 case ICMP_REDIRECT:
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100496 if (!sock_owned_by_user(sk))
497 do_redirect(icmp_skb, sk);
David S. Miller55be7a92012-07-11 21:27:49 -0700498 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 case ICMP_SOURCE_QUENCH:
500 /* Just silently ignore these. */
501 goto out;
502 case ICMP_PARAMETERPROB:
503 err = EPROTO;
504 break;
505 case ICMP_DEST_UNREACH:
506 if (code > NR_ICMP_UNREACH)
507 goto out;
508
509 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000510 /* We are not interested in TCP_LISTEN and open_requests
511 * (SYN-ACKs send out by Linux are always <576bytes so
512 * they should go through unfragmented).
513 */
514 if (sk->sk_state == TCP_LISTEN)
515 goto out;
516
Eric Dumazet563d34d2012-07-23 09:48:52 +0200517 tp->mtu_info = info;
Eric Dumazet144d56e2012-08-20 00:22:46 +0000518 if (!sock_owned_by_user(sk)) {
Eric Dumazet563d34d2012-07-23 09:48:52 +0200519 tcp_v4_mtu_reduced(sk);
Eric Dumazet144d56e2012-08-20 00:22:46 +0000520 } else {
Eric Dumazet7aa54702016-12-03 11:14:57 -0800521 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
Eric Dumazet144d56e2012-08-20 00:22:46 +0000522 sock_hold(sk);
523 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524 goto out;
525 }
526
527 err = icmp_err_convert[code].errno;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000528 /* check if icmp_skb allows revert of backoff
529 * (see draft-zimmermann-tcp-lcd) */
530 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
531 break;
532 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700533 !icsk->icsk_backoff || fastopen)
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000534 break;
535
David S. Miller8f49c272010-11-12 13:35:00 -0800536 if (sock_owned_by_user(sk))
537 break;
538
Eric Dumazet2c4cc972019-02-15 13:36:21 -0800539 skb = tcp_rtx_queue_head(sk);
540 if (WARN_ON_ONCE(!skb))
541 break;
542
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000543 icsk->icsk_backoff--;
Eric Dumazetfcdd1cf2014-09-22 13:19:44 -0700544 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
545 TCP_TIMEOUT_INIT;
546 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000547
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000548
Eric Dumazet9a568de2017-05-16 14:00:14 -0700549 tcp_mstamp_refresh(tp);
Eric Dumazet2fd66ff2018-09-21 08:51:47 -0700550 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
Eric Dumazet7faee5c2014-09-05 15:33:33 -0700551 remaining = icsk->icsk_rto -
Eric Dumazet9a568de2017-05-16 14:00:14 -0700552 usecs_to_jiffies(delta_us);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000553
Eric Dumazet9a568de2017-05-16 14:00:14 -0700554 if (remaining > 0) {
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000555 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
556 remaining, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000557 } else {
558 /* RTO revert clocked out retransmission.
559 * Will retransmit now */
560 tcp_retransmit_timer(sk);
561 }
562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 break;
564 case ICMP_TIME_EXCEEDED:
565 err = EHOSTUNREACH;
566 break;
567 default:
568 goto out;
569 }
570
571 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700573 case TCP_SYN_RECV:
574 /* Only in fast or simultaneous open. If a fast open socket is
575 * is already accepted it is treated as a connected one below.
576 */
Ian Morris51456b22015-04-03 09:17:26 +0100577 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700578 break;
579
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 sk->sk_err = err;
582
583 sk->sk_error_report(sk);
584
585 tcp_done(sk);
586 } else {
587 sk->sk_err_soft = err;
588 }
589 goto out;
590 }
591
592 /* If we've already connected we will keep trying
593 * until we time out, or the user gives up.
594 *
595 * rfc1122 4.2.3.9 allows to consider as hard errors
596 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
597 * but it is obsoleted by pmtu discovery).
598 *
599 * Note, that in modern internet, where routing is unreliable
600 * and in each dark corner broken firewalls sit, sending random
601 * errors ordered by their masters even this two messages finally lose
602 * their original sense (even Linux sends invalid PORT_UNREACHs)
603 *
604 * Now we are in compliance with RFCs.
605 * --ANK (980905)
606 */
607
608 inet = inet_sk(sk);
609 if (!sock_owned_by_user(sk) && inet->recverr) {
610 sk->sk_err = err;
611 sk->sk_error_report(sk);
612 } else { /* Only an error on timeout */
613 sk->sk_err_soft = err;
614 }
615
616out:
617 bh_unlock_sock(sk);
618 sock_put(sk);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100619 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620}
621
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000622void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623{
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700624 struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625
Eric Dumazet98be9b12018-02-19 11:56:52 -0800626 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
627 skb->csum_start = skb_transport_header(skb) - skb->head;
628 skb->csum_offset = offsetof(struct tcphdr, check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629}
630
Herbert Xu419f9f82010-04-11 02:15:53 +0000631/* This routine computes an IPv4 TCP checksum. */
Herbert Xubb296242010-04-11 02:15:55 +0000632void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
Herbert Xu419f9f82010-04-11 02:15:53 +0000633{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400634 const struct inet_sock *inet = inet_sk(sk);
Herbert Xu419f9f82010-04-11 02:15:53 +0000635
636 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
637}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000638EXPORT_SYMBOL(tcp_v4_send_check);
Herbert Xu419f9f82010-04-11 02:15:53 +0000639
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640/*
641 * This routine will send an RST to the other tcp.
642 *
643 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
644 * for reset.
645 * Answer: if a packet caused RST, it is not for a socket
646 * existing in our system, if it is matched to a socket,
647 * it is just duplicate segment or bug in other side's TCP.
648 * So that we build reply only basing on parameters
649 * arrived with segment.
650 * Exception: precedence violation. We do not implement it in any case.
651 */
652
Eric Dumazeta00e7442015-09-29 07:42:39 -0700653static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400655 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800656 struct {
657 struct tcphdr th;
658#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800659 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800660#endif
661 } rep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 struct ip_reply_arg arg;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800663#ifdef CONFIG_TCP_MD5SIG
Florian Westphale46787f2015-12-21 21:29:25 +0100664 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000665 const __u8 *hash_location = NULL;
666 unsigned char newhash[16];
667 int genhash;
668 struct sock *sk1 = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800669#endif
Pavel Emelyanova86b1e32008-07-16 20:20:58 -0700670 struct net *net;
Jon Maxwell00483692018-05-10 16:53:51 +1000671 struct sock *ctl_sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672
673 /* Never send a reset in response to a reset. */
674 if (th->rst)
675 return;
676
Eric Dumazetc3658e82014-11-25 07:40:04 -0800677 /* If sk not NULL, it means we did a successful lookup and incoming
678 * route had to be correct. prequeue might have dropped our dst.
679 */
680 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 return;
682
683 /* Swap the send and the receive. */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800684 memset(&rep, 0, sizeof(rep));
685 rep.th.dest = th->source;
686 rep.th.source = th->dest;
687 rep.th.doff = sizeof(struct tcphdr) / 4;
688 rep.th.rst = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690 if (th->ack) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800691 rep.th.seq = th->ack_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800693 rep.th.ack = 1;
694 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
695 skb->len - (th->doff << 2));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 }
697
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200698 memset(&arg, 0, sizeof(arg));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800699 arg.iov[0].iov_base = (unsigned char *)&rep;
700 arg.iov[0].iov_len = sizeof(rep.th);
701
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800702 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800703#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700704 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000705 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100706 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100707 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
708 &ip_hdr(skb)->saddr, AF_INET);
709 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000710 /*
711 * active side is lost. Try to find listening socket through
712 * source port, and then find md5 key through listening socket.
713 * we are not loose security here:
714 * Incoming packet is checked with md5 hash with finding key,
715 * no RST generated if md5 hash doesn't match.
716 */
Craig Galleka5836362016-02-10 11:50:38 -0500717 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
718 ip_hdr(skb)->saddr,
Tom Herbertda5e3632013-01-22 09:50:24 +0000719 th->source, ip_hdr(skb)->daddr,
David Ahern3fa6f612017-08-07 08:44:17 -0700720 ntohs(th->source), inet_iif(skb),
721 tcp_v4_sdif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000722 /* don't send rst if it can't find key */
723 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700724 goto out;
725
Shawn Lu658ddaa2012-01-31 22:35:48 +0000726 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
727 &ip_hdr(skb)->saddr, AF_INET);
728 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700729 goto out;
730
Shawn Lu658ddaa2012-01-31 22:35:48 +0000731
Eric Dumazet39f8e582015-03-24 15:58:55 -0700732 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000733 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700734 goto out;
735
Shawn Lu658ddaa2012-01-31 22:35:48 +0000736 }
737
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800738 if (key) {
739 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
740 (TCPOPT_NOP << 16) |
741 (TCPOPT_MD5SIG << 8) |
742 TCPOLEN_MD5SIG);
743 /* Update length and the length the header thinks exists */
744 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
745 rep.th.doff = arg.iov[0].iov_len / 4;
746
Adam Langley49a72df2008-07-19 00:01:42 -0700747 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
Ilpo Järvinen78e645cb2008-10-09 14:37:47 -0700748 key, ip_hdr(skb)->saddr,
749 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800750 }
751#endif
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700752 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
753 ip_hdr(skb)->saddr, /* XXX */
Ilpo Järvinen52cd5752008-10-08 11:34:06 -0700754 arg.iov[0].iov_len, IPPROTO_TCP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
Florian Westphal271c3b92015-12-21 21:29:26 +0100756 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
757
Shawn Lue2446ea2012-02-04 12:38:09 +0000758 /* When socket is gone, all binding information is lost.
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000759 * routing might fail in this case. No choice here, if we choose to force
760 * input interface, we will misroute in case of asymmetric route.
Shawn Lue2446ea2012-02-04 12:38:09 +0000761 */
Song Liuc24b14c2017-10-23 09:20:24 -0700762 if (sk) {
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000763 arg.bound_dev_if = sk->sk_bound_dev_if;
Song Liu5c487bb2018-02-06 20:50:23 -0800764 if (sk_fullsock(sk))
765 trace_tcp_send_reset(sk, skb);
Song Liuc24b14c2017-10-23 09:20:24 -0700766 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767
Florian Westphal271c3b92015-12-21 21:29:26 +0100768 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
769 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
770
Eric Dumazet66b13d92011-10-24 03:06:21 -0400771 arg.tos = ip_hdr(skb)->tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900772 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700773 local_bh_disable();
Jon Maxwell00483692018-05-10 16:53:51 +1000774 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
775 if (sk)
776 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
777 inet_twsk(sk)->tw_mark : sk->sk_mark;
778 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800779 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700780 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
781 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
Jon Maxwell00483692018-05-10 16:53:51 +1000783 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700784 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
785 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700786 local_bh_enable();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000787
788#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700789out:
790 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000791#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792}
793
794/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
795 outside socket context is ugly, certainly. What can I do?
796 */
797
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900798static void tcp_v4_send_ack(const struct sock *sk,
Eric Dumazete62a1232016-01-21 08:02:54 -0800799 struct sk_buff *skb, u32 seq, u32 ack,
Andrey Vaginee684b62013-02-11 05:50:19 +0000800 u32 win, u32 tsval, u32 tsecr, int oif,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700801 struct tcp_md5sig_key *key,
Eric Dumazet66b13d92011-10-24 03:06:21 -0400802 int reply_flags, u8 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400804 const struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 struct {
806 struct tcphdr th;
Al Viro714e85b2006-11-14 20:51:49 -0800807 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800808#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800809 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800810#endif
811 ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 } rep;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900813 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 struct ip_reply_arg arg;
Jon Maxwell00483692018-05-10 16:53:51 +1000815 struct sock *ctl_sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817 memset(&rep.th, 0, sizeof(struct tcphdr));
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200818 memset(&arg, 0, sizeof(arg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819
820 arg.iov[0].iov_base = (unsigned char *)&rep;
821 arg.iov[0].iov_len = sizeof(rep.th);
Andrey Vaginee684b62013-02-11 05:50:19 +0000822 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800823 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
824 (TCPOPT_TIMESTAMP << 8) |
825 TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000826 rep.opt[1] = htonl(tsval);
827 rep.opt[2] = htonl(tsecr);
Craig Schlentercb48cfe2007-01-09 00:11:15 -0800828 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 }
830
831 /* Swap the send and the receive. */
832 rep.th.dest = th->source;
833 rep.th.source = th->dest;
834 rep.th.doff = arg.iov[0].iov_len / 4;
835 rep.th.seq = htonl(seq);
836 rep.th.ack_seq = htonl(ack);
837 rep.th.ack = 1;
838 rep.th.window = htons(win);
839
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800840#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800841 if (key) {
Andrey Vaginee684b62013-02-11 05:50:19 +0000842 int offset = (tsecr) ? 3 : 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800843
844 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
845 (TCPOPT_NOP << 16) |
846 (TCPOPT_MD5SIG << 8) |
847 TCPOLEN_MD5SIG);
848 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
849 rep.th.doff = arg.iov[0].iov_len/4;
850
Adam Langley49a72df2008-07-19 00:01:42 -0700851 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
Adam Langley90b7e112008-07-31 20:49:48 -0700852 key, ip_hdr(skb)->saddr,
853 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800854 }
855#endif
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700856 arg.flags = reply_flags;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700857 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
858 ip_hdr(skb)->saddr, /* XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 arg.iov[0].iov_len, IPPROTO_TCP, 0);
860 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900861 if (oif)
862 arg.bound_dev_if = oif;
Eric Dumazet66b13d92011-10-24 03:06:21 -0400863 arg.tos = tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900864 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700865 local_bh_disable();
Jon Maxwell00483692018-05-10 16:53:51 +1000866 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
867 if (sk)
868 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
869 inet_twsk(sk)->tw_mark : sk->sk_mark;
870 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800871 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700872 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
873 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
Jon Maxwell00483692018-05-10 16:53:51 +1000875 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700876 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700877 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878}
879
880static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
881{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700882 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800883 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900885 tcp_v4_send_ack(sk, skb,
Eric Dumazete62a1232016-01-21 08:02:54 -0800886 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200887 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700888 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900889 tcptw->tw_ts_recent,
890 tw->tw_bound_dev_if,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700891 tcp_twsk_md5_key(tcptw),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400892 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
893 tw->tw_tos
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900894 );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700896 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897}
898
Eric Dumazeta00e7442015-09-29 07:42:39 -0700899static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200900 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901{
Jerry Chu168a8f52012-08-31 12:29:13 +0000902 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
903 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
904 */
Eric Dumazete62a1232016-01-21 08:02:54 -0800905 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
906 tcp_sk(sk)->snd_nxt;
907
Eric Dumazet20a2b492016-08-22 11:31:10 -0700908 /* RFC 7323 2.3
909 * The window field (SEG.WND) of every outgoing segment, with the
910 * exception of <SYN> segments, MUST be right-shifted by
911 * Rcv.Wind.Shift bits:
912 */
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900913 tcp_v4_send_ack(sk, skb, seq,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700914 tcp_rsk(req)->rcv_nxt,
915 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700916 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900917 req->ts_recent,
918 0,
Christoph Paasch30791ac2017-12-11 00:05:46 -0800919 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000920 AF_INET),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400921 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
922 ip_hdr(skb)->tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923}
924
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925/*
Kris Katterjohn9bf1d832008-02-17 22:29:19 -0800926 * Send a SYN-ACK after having received a SYN.
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700927 * This still operates on a request_sock only, not on a big
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 * socket.
929 */
Eric Dumazet0f935db2015-09-25 07:39:21 -0700930static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300931 struct flowi *fl,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800932 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700933 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700934 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700936 const struct inet_request_sock *ireq = inet_rsk(req);
David S. Miller6bd023f2011-05-18 18:32:03 -0400937 struct flowi4 fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 int err = -1;
Weilong Chend41db5a2013-12-23 14:37:28 +0800939 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940
941 /* First, grab a route. */
David S. Millerba3f7f02012-07-17 14:02:46 -0700942 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800943 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
Eric Dumazetb3d05142016-04-13 22:05:39 -0700945 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
947 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700948 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949
Eric Dumazet2ab2ddd2018-10-02 12:35:05 -0700950 rcu_read_lock();
Eric Dumazet634fb9792013-10-09 15:21:29 -0700951 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
952 ireq->ir_rmt_addr,
Eric Dumazet2ab2ddd2018-10-02 12:35:05 -0700953 rcu_dereference(ireq->ireq_opt));
954 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200955 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 }
957
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 return err;
959}
960
961/*
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700962 * IPv4 request_sock destructor.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 */
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700964static void tcp_v4_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965{
Eric Dumazetc92e8c02017-10-20 09:04:13 -0700966 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967}
968
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800969#ifdef CONFIG_TCP_MD5SIG
970/*
971 * RFC2385 MD5 checksumming requires a mapping of
972 * IP address->MD5 Key.
973 * We need to maintain these in the sk structure.
974 */
975
Eric Dumazet921f9a02019-02-26 09:49:11 -0800976DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
Eric Dumazet6015c712018-11-27 15:03:21 -0800977EXPORT_SYMBOL(tcp_md5_needed);
978
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800979/* Find the Key structure for an address. */
Eric Dumazet6015c712018-11-27 15:03:21 -0800980struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
981 const union tcp_md5_addr *addr,
982 int family)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800983{
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700984 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000985 struct tcp_md5sig_key *key;
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700986 const struct tcp_md5sig_info *md5sig;
Ivan Delalande67973182017-06-15 18:07:06 -0700987 __be32 mask;
988 struct tcp_md5sig_key *best_match = NULL;
989 bool match;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800990
Eric Dumazeta8afca02012-01-31 18:45:40 +0000991 /* caller either holds rcu_read_lock() or socket lock */
992 md5sig = rcu_dereference_check(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200993 lockdep_sock_is_held(sk));
Eric Dumazeta8afca02012-01-31 18:45:40 +0000994 if (!md5sig)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800995 return NULL;
Arnd Bergmann083a0322017-06-20 22:11:21 +0200996
Sasha Levinb67bfe02013-02-27 17:06:00 -0800997 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000998 if (key->family != family)
999 continue;
Ivan Delalande67973182017-06-15 18:07:06 -07001000
1001 if (family == AF_INET) {
1002 mask = inet_make_mask(key->prefixlen);
1003 match = (key->addr.a4.s_addr & mask) ==
1004 (addr->a4.s_addr & mask);
1005#if IS_ENABLED(CONFIG_IPV6)
1006 } else if (family == AF_INET6) {
1007 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1008 key->prefixlen);
1009#endif
1010 } else {
1011 match = false;
1012 }
1013
1014 if (match && (!best_match ||
1015 key->prefixlen > best_match->prefixlen))
1016 best_match = key;
1017 }
1018 return best_match;
1019}
Eric Dumazet6015c712018-11-27 15:03:21 -08001020EXPORT_SYMBOL(__tcp_md5_do_lookup);
Ivan Delalande67973182017-06-15 18:07:06 -07001021
Wu Fengguange8f37d52017-07-06 07:58:53 +08001022static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1023 const union tcp_md5_addr *addr,
1024 int family, u8 prefixlen)
Ivan Delalande67973182017-06-15 18:07:06 -07001025{
1026 const struct tcp_sock *tp = tcp_sk(sk);
1027 struct tcp_md5sig_key *key;
1028 unsigned int size = sizeof(struct in_addr);
1029 const struct tcp_md5sig_info *md5sig;
1030
1031 /* caller either holds rcu_read_lock() or socket lock */
1032 md5sig = rcu_dereference_check(tp->md5sig_info,
1033 lockdep_sock_is_held(sk));
1034 if (!md5sig)
1035 return NULL;
1036#if IS_ENABLED(CONFIG_IPV6)
1037 if (family == AF_INET6)
1038 size = sizeof(struct in6_addr);
1039#endif
1040 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1041 if (key->family != family)
1042 continue;
1043 if (!memcmp(&key->addr, addr, size) &&
1044 key->prefixlen == prefixlen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001045 return key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001046 }
1047 return NULL;
1048}
1049
Eric Dumazetb83e3de2015-09-25 07:39:15 -07001050struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001051 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001052{
Eric Dumazetb52e6922015-04-09 14:36:42 -07001053 const union tcp_md5_addr *addr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001054
Eric Dumazetb52e6922015-04-09 14:36:42 -07001055 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001056 return tcp_md5_do_lookup(sk, addr, AF_INET);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001057}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001058EXPORT_SYMBOL(tcp_v4_md5_lookup);
1059
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001060/* This can be called on a newly created socket, from other files */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001061int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
Ivan Delalande67973182017-06-15 18:07:06 -07001062 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1063 gfp_t gfp)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001064{
1065 /* Add Key to the list */
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001066 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001067 struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001068 struct tcp_md5sig_info *md5sig;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001069
Ivan Delalande67973182017-06-15 18:07:06 -07001070 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001071 if (key) {
1072 /* Pre-existing entry - just update that one. */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001073 memcpy(key->key, newkey, newkeylen);
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001074 key->keylen = newkeylen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001075 return 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001076 }
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001077
Eric Dumazeta8afca02012-01-31 18:45:40 +00001078 md5sig = rcu_dereference_protected(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +02001079 lockdep_sock_is_held(sk));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001080 if (!md5sig) {
1081 md5sig = kmalloc(sizeof(*md5sig), gfp);
1082 if (!md5sig)
1083 return -ENOMEM;
1084
1085 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1086 INIT_HLIST_HEAD(&md5sig->head);
Eric Dumazeta8afca02012-01-31 18:45:40 +00001087 rcu_assign_pointer(tp->md5sig_info, md5sig);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001088 }
1089
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001090 key = sock_kmalloc(sk, sizeof(*key), gfp);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001091 if (!key)
1092 return -ENOMEM;
Eric Dumazet71cea172013-05-20 06:52:26 +00001093 if (!tcp_alloc_md5sig_pool()) {
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001094 sock_kfree_s(sk, key, sizeof(*key));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001095 return -ENOMEM;
1096 }
1097
1098 memcpy(key->key, newkey, newkeylen);
1099 key->keylen = newkeylen;
1100 key->family = family;
Ivan Delalande67973182017-06-15 18:07:06 -07001101 key->prefixlen = prefixlen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001102 memcpy(&key->addr, addr,
1103 (family == AF_INET6) ? sizeof(struct in6_addr) :
1104 sizeof(struct in_addr));
1105 hlist_add_head_rcu(&key->node, &md5sig->head);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001106 return 0;
1107}
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001108EXPORT_SYMBOL(tcp_md5_do_add);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001109
Ivan Delalande67973182017-06-15 18:07:06 -07001110int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1111 u8 prefixlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001112{
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001113 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001114
Ivan Delalande67973182017-06-15 18:07:06 -07001115 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001116 if (!key)
1117 return -ENOENT;
1118 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001119 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001120 kfree_rcu(key, rcu);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001121 return 0;
1122}
1123EXPORT_SYMBOL(tcp_md5_do_del);
1124
stephen hemmingere0683e702012-10-26 14:31:40 +00001125static void tcp_clear_md5_list(struct sock *sk)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001126{
1127 struct tcp_sock *tp = tcp_sk(sk);
1128 struct tcp_md5sig_key *key;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001129 struct hlist_node *n;
Eric Dumazeta8afca02012-01-31 18:45:40 +00001130 struct tcp_md5sig_info *md5sig;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001131
Eric Dumazeta8afca02012-01-31 18:45:40 +00001132 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1133
Sasha Levinb67bfe02013-02-27 17:06:00 -08001134 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001135 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001136 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001137 kfree_rcu(key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001138 }
1139}
1140
Ivan Delalande8917a772017-06-15 18:07:07 -07001141static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1142 char __user *optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001143{
1144 struct tcp_md5sig cmd;
1145 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
Ivan Delalande8917a772017-06-15 18:07:07 -07001146 u8 prefixlen = 32;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001147
1148 if (optlen < sizeof(cmd))
1149 return -EINVAL;
1150
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02001151 if (copy_from_user(&cmd, optval, sizeof(cmd)))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001152 return -EFAULT;
1153
1154 if (sin->sin_family != AF_INET)
1155 return -EINVAL;
1156
Ivan Delalande8917a772017-06-15 18:07:07 -07001157 if (optname == TCP_MD5SIG_EXT &&
1158 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1159 prefixlen = cmd.tcpm_prefixlen;
1160 if (prefixlen > 32)
1161 return -EINVAL;
1162 }
1163
Dmitry Popov64a124e2014-08-03 22:45:19 +04001164 if (!cmd.tcpm_keylen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001165 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001166 AF_INET, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001167
1168 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1169 return -EINVAL;
1170
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001171 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001172 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001173 GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001174}
1175
Eric Dumazet19689e32016-06-27 18:51:53 +02001176static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1177 __be32 daddr, __be32 saddr,
1178 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001179{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001180 struct tcp4_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -07001181 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +02001182 struct tcphdr *_th;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001183
Eric Dumazet19689e32016-06-27 18:51:53 +02001184 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001185 bp->saddr = saddr;
1186 bp->daddr = daddr;
1187 bp->pad = 0;
YOSHIFUJI Hideaki076fb722008-04-17 12:48:12 +09001188 bp->protocol = IPPROTO_TCP;
Adam Langley49a72df2008-07-19 00:01:42 -07001189 bp->len = cpu_to_be16(nbytes);
David S. Millerc7da57a2007-10-26 00:41:21 -07001190
Eric Dumazet19689e32016-06-27 18:51:53 +02001191 _th = (struct tcphdr *)(bp + 1);
1192 memcpy(_th, th, sizeof(*th));
1193 _th->check = 0;
1194
1195 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1196 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1197 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +08001198 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -07001199}
1200
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001201static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001202 __be32 daddr, __be32 saddr, const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -07001203{
1204 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001205 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -07001206
1207 hp = tcp_get_md5sig_pool();
1208 if (!hp)
1209 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001210 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001211
Herbert Xucf80e0e2016-01-24 21:20:23 +08001212 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001213 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +02001214 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -07001215 goto clear_hash;
1216 if (tcp_md5_hash_key(hp, key))
1217 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001218 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1219 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001220 goto clear_hash;
1221
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001222 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001223 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -07001224
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001225clear_hash:
1226 tcp_put_md5sig_pool();
1227clear_hash_noput:
1228 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -07001229 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001230}
1231
Eric Dumazet39f8e582015-03-24 15:58:55 -07001232int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1233 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001234 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001235{
Adam Langley49a72df2008-07-19 00:01:42 -07001236 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001237 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001238 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001239 __be32 saddr, daddr;
1240
Eric Dumazet39f8e582015-03-24 15:58:55 -07001241 if (sk) { /* valid for establish/request sockets */
1242 saddr = sk->sk_rcv_saddr;
1243 daddr = sk->sk_daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001244 } else {
Adam Langley49a72df2008-07-19 00:01:42 -07001245 const struct iphdr *iph = ip_hdr(skb);
1246 saddr = iph->saddr;
1247 daddr = iph->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001248 }
Adam Langley49a72df2008-07-19 00:01:42 -07001249
1250 hp = tcp_get_md5sig_pool();
1251 if (!hp)
1252 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001253 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001254
Herbert Xucf80e0e2016-01-24 21:20:23 +08001255 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001256 goto clear_hash;
1257
Eric Dumazet19689e32016-06-27 18:51:53 +02001258 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -07001259 goto clear_hash;
1260 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1261 goto clear_hash;
1262 if (tcp_md5_hash_key(hp, key))
1263 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001264 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1265 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001266 goto clear_hash;
1267
1268 tcp_put_md5sig_pool();
1269 return 0;
1270
1271clear_hash:
1272 tcp_put_md5sig_pool();
1273clear_hash_noput:
1274 memset(md5_hash, 0, 16);
1275 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001276}
Adam Langley49a72df2008-07-19 00:01:42 -07001277EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001278
Eric Dumazetba8e2752015-10-02 11:43:28 -07001279#endif
1280
Eric Dumazetff74e232015-03-24 15:58:54 -07001281/* Called with rcu_read_lock() */
Eric Dumazetba8e2752015-10-02 11:43:28 -07001282static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
Eric Dumazetff74e232015-03-24 15:58:54 -07001283 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001284{
Eric Dumazetba8e2752015-10-02 11:43:28 -07001285#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001286 /*
1287 * This gets called for each TCP segment that arrives
1288 * so we want to be efficient.
1289 * We have 3 drop cases:
1290 * o No MD5 hash and one expected.
1291 * o MD5 hash and we're not expecting one.
1292 * o MD5 hash and its wrong.
1293 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001294 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001295 struct tcp_md5sig_key *hash_expected;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001296 const struct iphdr *iph = ip_hdr(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001297 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001298 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001299 unsigned char newhash[16];
1300
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001301 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1302 AF_INET);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +09001303 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001304
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001305 /* We've parsed the options - do we have a hash? */
1306 if (!hash_expected && !hash_location)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001307 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001308
1309 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001310 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001311 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001312 }
1313
1314 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001315 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001316 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001317 }
1318
1319 /* Okay, so this is hash_expected and hash_location -
1320 * so we need to calculate the checksum.
1321 */
Adam Langley49a72df2008-07-19 00:01:42 -07001322 genhash = tcp_v4_md5_hash_skb(newhash,
1323 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -07001324 NULL, skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001325
1326 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -07001327 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +00001328 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1329 &iph->saddr, ntohs(th->source),
1330 &iph->daddr, ntohs(th->dest),
1331 genhash ? " tcp_v4_calc_md5_hash failed"
1332 : "");
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001333 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001334 }
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001335 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001336#endif
Eric Dumazetba8e2752015-10-02 11:43:28 -07001337 return false;
1338}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001339
Eric Dumazetb40cf182015-09-25 07:39:08 -07001340static void tcp_v4_init_req(struct request_sock *req,
1341 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +03001342 struct sk_buff *skb)
1343{
1344 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001345 struct net *net = sock_net(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +03001346
Eric Dumazet08d2cc3b2015-03-18 14:05:38 -07001347 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1348 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001349 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
Octavian Purdila16bea702014-06-25 17:09:53 +03001350}
1351
Eric Dumazetf9646292015-09-29 07:42:50 -07001352static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1353 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001354 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +03001355{
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001356 return inet_csk_route_req(sk, &fl->u.ip4, req);
Octavian Purdilad94e0412014-06-25 17:09:55 +03001357}
1358
Eric Dumazet72a3eff2006-11-16 02:30:37 -08001359struct request_sock_ops tcp_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 .family = PF_INET,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001361 .obj_size = sizeof(struct tcp_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +03001362 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001363 .send_ack = tcp_v4_reqsk_send_ack,
1364 .destructor = tcp_v4_reqsk_destructor,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 .send_reset = tcp_v4_send_reset,
stephen hemminger688d1942014-08-29 23:32:05 -07001366 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367};
1368
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001369static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +03001370 .mss_clamp = TCP_MSS_DEFAULT,
Octavian Purdila16bea702014-06-25 17:09:53 +03001371#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001372 .req_md5_lookup = tcp_v4_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +00001373 .calc_md5_hash = tcp_v4_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -08001374#endif
Octavian Purdila16bea702014-06-25 17:09:53 +03001375 .init_req = tcp_v4_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001376#ifdef CONFIG_SYN_COOKIES
1377 .cookie_init_seq = cookie_v4_init_sequence,
1378#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +03001379 .route_req = tcp_v4_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -07001380 .init_seq = tcp_v4_init_seq,
1381 .init_ts_off = tcp_v4_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +03001382 .send_synack = tcp_v4_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +03001383};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001384
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1386{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 /* Never answer to SYNs send to broadcast or multicast */
Eric Dumazet511c3f92009-06-02 05:14:27 +00001388 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 goto drop;
1390
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001391 return tcp_conn_request(&tcp_request_sock_ops,
1392 &tcp_request_sock_ipv4_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001395 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 return 0;
1397}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001398EXPORT_SYMBOL(tcp_v4_conn_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
1400
1401/*
1402 * The three way handshake has completed - we got a valid synack -
1403 * now create the new socket.
1404 */
Eric Dumazet0c271712015-09-29 07:42:48 -07001405struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001406 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001407 struct dst_entry *dst,
1408 struct request_sock *req_unhash,
1409 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001411 struct inet_request_sock *ireq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 struct inet_sock *newinet;
1413 struct tcp_sock *newtp;
1414 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001415#ifdef CONFIG_TCP_MD5SIG
1416 struct tcp_md5sig_key *key;
1417#endif
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001418 struct ip_options_rcu *inet_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419
1420 if (sk_acceptq_is_full(sk))
1421 goto exit_overflow;
1422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 newsk = tcp_create_openreq_child(sk, req, skb);
1424 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001425 goto exit_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
Herbert Xubcd76112006-06-30 13:36:35 -07001427 newsk->sk_gso_type = SKB_GSO_TCPV4;
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001428 inet_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
1430 newtp = tcp_sk(newsk);
1431 newinet = inet_sk(newsk);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001432 ireq = inet_rsk(req);
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001433 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1434 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
David Ahern6dd9a142015-12-16 13:20:44 -08001435 newsk->sk_bound_dev_if = ireq->ir_iif;
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001436 newinet->inet_saddr = ireq->ir_loc_addr;
1437 inet_opt = rcu_dereference(ireq->ireq_opt);
1438 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001439 newinet->mc_index = inet_iif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001440 newinet->mc_ttl = ip_hdr(skb)->ttl;
Jiri Benc4c507d22012-02-09 09:35:49 +00001441 newinet->rcv_tos = ip_hdr(skb)->tos;
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001442 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001443 if (inet_opt)
1444 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001445 newinet->inet_id = newtp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
Eric Dumazetdfd25ff2012-03-10 09:20:21 +00001447 if (!dst) {
1448 dst = inet_csk_route_child_sock(sk, newsk, req);
1449 if (!dst)
1450 goto put_and_exit;
1451 } else {
1452 /* syncookie case : see end of cookie_v4_check() */
1453 }
David S. Miller0e734412011-05-08 15:28:03 -07001454 sk_setup_caps(newsk, dst);
1455
Daniel Borkmann81164412015-01-05 23:57:48 +01001456 tcp_ca_openreq_child(newsk, dst);
1457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001459 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Tom Quetchenbachf5fff5d2008-09-21 00:21:51 -07001460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 tcp_initialize_rcv_mss(newsk);
1462
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001463#ifdef CONFIG_TCP_MD5SIG
1464 /* Copy over the MD5 key from the original socket */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001465 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1466 AF_INET);
Ian Morris00db4122015-04-03 09:17:27 +01001467 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001468 /*
1469 * We're using one, so create a matching key
1470 * on the newsk structure. If we fail to get
1471 * memory, then we end up not copying the key
1472 * across. Shucks.
1473 */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001474 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
Ivan Delalande67973182017-06-15 18:07:06 -07001475 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
Eric Dumazeta4654192010-05-16 00:36:33 -07001476 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001477 }
1478#endif
1479
David S. Miller0e734412011-05-08 15:28:03 -07001480 if (__inet_inherit_port(sk, newsk) < 0)
1481 goto put_and_exit;
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001482 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001483 if (likely(*own_req)) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001484 tcp_move_syn(newtp, req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001485 ireq->ireq_opt = NULL;
1486 } else {
1487 newinet->inet_opt = NULL;
1488 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 return newsk;
1490
1491exit_overflow:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001492 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001493exit_nonewsk:
1494 dst_release(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495exit:
Eric Dumazet9caad862016-04-01 08:52:20 -07001496 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 return NULL;
David S. Miller0e734412011-05-08 15:28:03 -07001498put_and_exit:
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001499 newinet->inet_opt = NULL;
Christoph Paasche337e242012-12-14 04:07:58 +00001500 inet_csk_prepare_forced_close(newsk);
1501 tcp_done(newsk);
David S. Miller0e734412011-05-08 15:28:03 -07001502 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001504EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505
Eric Dumazet079096f2015-10-02 11:43:32 -07001506static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001509 const struct tcphdr *th = tcp_hdr(skb);
1510
Florian Westphalaf9b4732010-06-03 00:43:44 +00001511 if (!th->syn)
Cong Wang461b74c2014-10-15 14:33:22 -07001512 sk = cookie_v4_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513#endif
1514 return sk;
1515}
1516
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001518 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 *
1520 * We have a potential double-lock case here, so even when
1521 * doing backlog processing we use the BH locking scheme.
1522 * This is because we cannot sleep with the original spinlock
1523 * held.
1524 */
1525int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1526{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001527 struct sock *rsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet404e0a82012-07-29 23:20:37 +00001530 struct dst_entry *dst = sk->sk_rx_dst;
1531
Tom Herbertbdeab992011-08-14 19:45:55 +00001532 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001533 sk_mark_napi_id(sk, skb);
Eric Dumazet404e0a82012-07-29 23:20:37 +00001534 if (dst) {
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001535 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
Ian Morris51456b22015-04-03 09:17:26 +01001536 !dst->ops->check(dst, 0)) {
David S. Miller92101b32012-07-23 16:29:00 -07001537 dst_release(dst);
1538 sk->sk_rx_dst = NULL;
1539 }
1540 }
Yafang Shao3d97d882018-05-29 23:27:31 +08001541 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542 return 0;
1543 }
1544
Eric Dumazet12e25e12015-06-03 23:49:21 -07001545 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 goto csum_err;
1547
1548 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001549 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 if (!nsk)
1552 goto discard;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 if (nsk != sk) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001554 if (tcp_child_process(sk, nsk, skb)) {
1555 rsk = nsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 return 0;
1559 }
Eric Dumazetca551582010-06-03 09:03:58 +00001560 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001561 sock_rps_save_rxhash(sk, skb);
Eric Dumazetca551582010-06-03 09:03:58 +00001562
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001563 if (tcp_rcv_state_process(sk, skb)) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001564 rsk = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001566 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 return 0;
1568
1569reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001570 tcp_v4_send_reset(rsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571discard:
1572 kfree_skb(skb);
1573 /* Be careful here. If this function gets more complicated and
1574 * gcc suffers from register pressure on the x86, sk (in %ebx)
1575 * might be destroyed here. This current version compiles correctly,
1576 * but you have been warned.
1577 */
1578 return 0;
1579
1580csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001581 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1582 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 goto discard;
1584}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001585EXPORT_SYMBOL(tcp_v4_do_rcv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
Paolo Abeni74874492017-09-28 15:51:36 +02001587int tcp_v4_early_demux(struct sk_buff *skb)
David S. Miller41063e92012-06-19 21:22:05 -07001588{
David S. Miller41063e92012-06-19 21:22:05 -07001589 const struct iphdr *iph;
1590 const struct tcphdr *th;
1591 struct sock *sk;
David S. Miller41063e92012-06-19 21:22:05 -07001592
David S. Miller41063e92012-06-19 21:22:05 -07001593 if (skb->pkt_type != PACKET_HOST)
Paolo Abeni74874492017-09-28 15:51:36 +02001594 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001595
Eric Dumazet45f00f92012-10-22 21:42:47 +00001596 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
Paolo Abeni74874492017-09-28 15:51:36 +02001597 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001598
1599 iph = ip_hdr(skb);
Eric Dumazet45f00f92012-10-22 21:42:47 +00001600 th = tcp_hdr(skb);
David S. Miller41063e92012-06-19 21:22:05 -07001601
1602 if (th->doff < sizeof(struct tcphdr) / 4)
Paolo Abeni74874492017-09-28 15:51:36 +02001603 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001604
Eric Dumazet45f00f92012-10-22 21:42:47 +00001605 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
David S. Miller41063e92012-06-19 21:22:05 -07001606 iph->saddr, th->source,
Vijay Subramanian7011d082012-06-23 17:38:10 +00001607 iph->daddr, ntohs(th->dest),
David Ahern3fa6f612017-08-07 08:44:17 -07001608 skb->skb_iif, inet_sdif(skb));
David S. Miller41063e92012-06-19 21:22:05 -07001609 if (sk) {
1610 skb->sk = sk;
1611 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001612 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001613 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001614
David S. Miller41063e92012-06-19 21:22:05 -07001615 if (dst)
1616 dst = dst_check(dst, 0);
David S. Miller92101b32012-07-23 16:29:00 -07001617 if (dst &&
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001618 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
David S. Miller92101b32012-07-23 16:29:00 -07001619 skb_dst_set_noref(skb, dst);
David S. Miller41063e92012-06-19 21:22:05 -07001620 }
1621 }
Paolo Abeni74874492017-09-28 15:51:36 +02001622 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001623}
1624
Eric Dumazetc9c33212016-08-27 07:37:54 -07001625bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1626{
1627 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
Eric Dumazet4f693b52018-11-27 14:42:03 -08001628 struct skb_shared_info *shinfo;
1629 const struct tcphdr *th;
1630 struct tcphdr *thtail;
1631 struct sk_buff *tail;
1632 unsigned int hdrlen;
1633 bool fragstolen;
1634 u32 gso_segs;
1635 int delta;
Eric Dumazetc9c33212016-08-27 07:37:54 -07001636
1637 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1638 * we can fix skb->truesize to its real value to avoid future drops.
1639 * This is valid because skb is not yet charged to the socket.
1640 * It has been noticed pure SACK packets were sometimes dropped
1641 * (if cooked by drivers without copybreak feature).
1642 */
Eric Dumazet60b1af32017-01-24 14:57:36 -08001643 skb_condense(skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001644
Eric Dumazetade96282018-11-19 17:45:55 -08001645 skb_dst_drop(skb);
1646
Eric Dumazet4f693b52018-11-27 14:42:03 -08001647 if (unlikely(tcp_checksum_complete(skb))) {
1648 bh_unlock_sock(sk);
1649 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1650 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1651 return true;
1652 }
1653
1654 /* Attempt coalescing to last skb in backlog, even if we are
1655 * above the limits.
1656 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1657 */
1658 th = (const struct tcphdr *)skb->data;
1659 hdrlen = th->doff * 4;
1660 shinfo = skb_shinfo(skb);
1661
1662 if (!shinfo->gso_size)
1663 shinfo->gso_size = skb->len - hdrlen;
1664
1665 if (!shinfo->gso_segs)
1666 shinfo->gso_segs = 1;
1667
1668 tail = sk->sk_backlog.tail;
1669 if (!tail)
1670 goto no_coalesce;
1671 thtail = (struct tcphdr *)tail->data;
1672
1673 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1674 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1675 ((TCP_SKB_CB(tail)->tcp_flags |
Eric Dumazetca2fe292019-04-26 10:10:05 -07001676 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1677 !((TCP_SKB_CB(tail)->tcp_flags &
1678 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
Eric Dumazet4f693b52018-11-27 14:42:03 -08001679 ((TCP_SKB_CB(tail)->tcp_flags ^
1680 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1681#ifdef CONFIG_TLS_DEVICE
1682 tail->decrypted != skb->decrypted ||
1683#endif
1684 thtail->doff != th->doff ||
1685 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1686 goto no_coalesce;
1687
1688 __skb_pull(skb, hdrlen);
1689 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1690 thtail->window = th->window;
1691
1692 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1693
1694 if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
1695 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1696
Eric Dumazetca2fe292019-04-26 10:10:05 -07001697 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1698 * thtail->fin, so that the fast path in tcp_rcv_established()
1699 * is not entered if we append a packet with a FIN.
1700 * SYN, RST, URG are not present.
1701 * ACK is set on both packets.
1702 * PSH : we do not really care in TCP stack,
1703 * at least for 'GRO' packets.
1704 */
1705 thtail->fin |= th->fin;
Eric Dumazet4f693b52018-11-27 14:42:03 -08001706 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1707
1708 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1709 TCP_SKB_CB(tail)->has_rxtstamp = true;
1710 tail->tstamp = skb->tstamp;
1711 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1712 }
1713
1714 /* Not as strict as GRO. We only need to carry mss max value */
1715 skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
1716 skb_shinfo(tail)->gso_size);
1717
1718 gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
1719 skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
1720
1721 sk->sk_backlog.len += delta;
1722 __NET_INC_STATS(sock_net(sk),
1723 LINUX_MIB_TCPBACKLOGCOALESCE);
1724 kfree_skb_partial(skb, fragstolen);
1725 return false;
1726 }
1727 __skb_push(skb, hdrlen);
1728
1729no_coalesce:
1730 /* Only socket owner can try to collapse/prune rx queues
1731 * to reduce memory overhead, so add a little headroom here.
1732 * Few sockets backlog are possibly concurrently non empty.
1733 */
1734 limit += 64*1024;
1735
Eric Dumazetc9c33212016-08-27 07:37:54 -07001736 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1737 bh_unlock_sock(sk);
1738 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1739 return true;
1740 }
1741 return false;
1742}
1743EXPORT_SYMBOL(tcp_add_backlog);
1744
Eric Dumazetac6e7802016-11-10 13:12:35 -08001745int tcp_filter(struct sock *sk, struct sk_buff *skb)
1746{
1747 struct tcphdr *th = (struct tcphdr *)skb->data;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001748
Christoph Paaschf2feaef2019-03-11 11:41:05 -07001749 return sk_filter_trim_cap(sk, skb, th->doff * 4);
Eric Dumazetac6e7802016-11-10 13:12:35 -08001750}
1751EXPORT_SYMBOL(tcp_filter);
1752
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001753static void tcp_v4_restore_cb(struct sk_buff *skb)
1754{
1755 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1756 sizeof(struct inet_skb_parm));
1757}
1758
1759static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1760 const struct tcphdr *th)
1761{
1762 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1763 * barrier() makes sure compiler wont play fool^Waliasing games.
1764 */
1765 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1766 sizeof(struct inet_skb_parm));
1767 barrier();
1768
1769 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1770 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1771 skb->len - th->doff * 4);
1772 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1773 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1774 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1775 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1776 TCP_SKB_CB(skb)->sacked = 0;
1777 TCP_SKB_CB(skb)->has_rxtstamp =
1778 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1779}
1780
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781/*
1782 * From tcp_input.c
1783 */
1784
1785int tcp_v4_rcv(struct sk_buff *skb)
1786{
Eric Dumazet3b24d852016-04-01 08:52:17 -07001787 struct net *net = dev_net(skb->dev);
David Ahern3fa6f612017-08-07 08:44:17 -07001788 int sdif = inet_sdif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001789 const struct iphdr *iph;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001790 const struct tcphdr *th;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001791 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 struct sock *sk;
1793 int ret;
1794
1795 if (skb->pkt_type != PACKET_HOST)
1796 goto discard_it;
1797
1798 /* Count it even if it's bad */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001799 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
1801 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1802 goto discard_it;
1803
Eric Dumazetea1627c2016-05-13 09:16:40 -07001804 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
Eric Dumazetea1627c2016-05-13 09:16:40 -07001806 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 goto bad_packet;
1808 if (!pskb_may_pull(skb, th->doff * 4))
1809 goto discard_it;
1810
1811 /* An explanation is required here, I think.
1812 * Packet length and doff are validated by header prediction,
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001813 * provided case of th->doff==0 is eliminated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 * So, we defer the checks. */
Tom Herberted70fcf2014-05-02 16:29:38 -07001815
1816 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001817 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818
Eric Dumazetea1627c2016-05-13 09:16:40 -07001819 th = (const struct tcphdr *)skb->data;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001820 iph = ip_hdr(skb);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001821lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001822 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
David Ahern3fa6f612017-08-07 08:44:17 -07001823 th->dest, sdif, &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 if (!sk)
1825 goto no_tcp_socket;
1826
Eric Dumazetbb134d52010-03-09 05:55:56 +00001827process:
1828 if (sk->sk_state == TCP_TIME_WAIT)
1829 goto do_time_wait;
1830
Eric Dumazet079096f2015-10-02 11:43:32 -07001831 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1832 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001833 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001834 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001835
1836 sk = req->rsk_listener;
Eric Dumazet72923552016-02-11 22:50:29 -08001837 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001838 sk_drops_add(sk, skb);
Eric Dumazet72923552016-02-11 22:50:29 -08001839 reqsk_put(req);
1840 goto discard_it;
1841 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001842 if (tcp_checksum_complete(skb)) {
1843 reqsk_put(req);
1844 goto csum_error;
1845 }
Eric Dumazet77166822016-02-18 05:39:18 -08001846 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001847 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001848 goto lookup;
1849 }
Eric Dumazet3b24d852016-04-01 08:52:17 -07001850 /* We own a reference on the listener, increase it again
1851 * as we might lose it too soon.
1852 */
Eric Dumazet77166822016-02-18 05:39:18 -08001853 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001854 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001855 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001856 if (!tcp_filter(sk, skb)) {
1857 th = (const struct tcphdr *)skb->data;
1858 iph = ip_hdr(skb);
1859 tcp_v4_fill_cb(skb, iph, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001860 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001861 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001862 if (!nsk) {
1863 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001864 if (req_stolen) {
1865 /* Another cpu got exclusive access to req
1866 * and created a full blown socket.
1867 * Try to feed this packet to this socket
1868 * instead of discarding it.
1869 */
1870 tcp_v4_restore_cb(skb);
1871 sock_put(sk);
1872 goto lookup;
1873 }
Eric Dumazet77166822016-02-18 05:39:18 -08001874 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001875 }
1876 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001877 reqsk_put(req);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001878 tcp_v4_restore_cb(skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001879 } else if (tcp_child_process(sk, nsk, skb)) {
1880 tcp_v4_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001881 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001882 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001883 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001884 return 0;
1885 }
1886 }
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001887 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001888 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001889 goto discard_and_relse;
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001890 }
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001891
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1893 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001894
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001895 if (tcp_v4_inbound_md5_hash(sk, skb))
1896 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001897
Patrick McHardyb59c2702006-01-06 23:06:10 -08001898 nf_reset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
Eric Dumazetac6e7802016-11-10 13:12:35 -08001900 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001902 th = (const struct tcphdr *)skb->data;
1903 iph = ip_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001904 tcp_v4_fill_cb(skb, iph, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905
1906 skb->dev = NULL;
1907
Eric Dumazete994b2f2015-10-02 11:43:39 -07001908 if (sk->sk_state == TCP_LISTEN) {
1909 ret = tcp_v4_do_rcv(sk, skb);
1910 goto put_and_return;
1911 }
1912
1913 sk_incoming_cpu_update(sk);
1914
Ingo Molnarc6366182006-07-03 00:25:13 -07001915 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001916 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917 ret = 0;
1918 if (!sock_owned_by_user(sk)) {
Florian Westphale7942d02017-07-30 03:57:18 +02001919 ret = tcp_v4_do_rcv(sk, skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001920 } else if (tcp_add_backlog(sk, skb)) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001921 goto discard_and_relse;
1922 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 bh_unlock_sock(sk);
1924
Eric Dumazete994b2f2015-10-02 11:43:39 -07001925put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001926 if (refcounted)
1927 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928
1929 return ret;
1930
1931no_tcp_socket:
1932 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1933 goto discard_it;
1934
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001935 tcp_v4_fill_cb(skb, iph, th);
1936
Eric Dumazet12e25e12015-06-03 23:49:21 -07001937 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001938csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001939 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001941 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001943 tcp_v4_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 }
1945
1946discard_it:
1947 /* Discard frame. */
1948 kfree_skb(skb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001949 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950
1951discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001952 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001953 if (refcounted)
1954 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955 goto discard_it;
1956
1957do_time_wait:
1958 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001959 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960 goto discard_it;
1961 }
1962
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001963 tcp_v4_fill_cb(skb, iph, th);
1964
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001965 if (tcp_checksum_complete(skb)) {
1966 inet_twsk_put(inet_twsk(sk));
1967 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 }
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001969 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 case TCP_TW_SYN: {
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001971 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
Craig Galleka5836362016-02-10 11:50:38 -05001972 &tcp_hashinfo, skb,
1973 __tcp_hdrlen(th),
Tom Herbertda5e3632013-01-22 09:50:24 +00001974 iph->saddr, th->source,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001975 iph->daddr, th->dest,
David Ahern3fa6f612017-08-07 08:44:17 -07001976 inet_iif(skb),
1977 sdif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978 if (sk2) {
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001979 inet_twsk_deschedule_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 sk = sk2;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001981 tcp_v4_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001982 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 goto process;
1984 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 }
Gustavo A. R. Silvafcfd6df2017-10-16 15:48:55 -05001986 /* to ACK */
1987 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 case TCP_TW_ACK:
1989 tcp_v4_timewait_ack(sk, skb);
1990 break;
1991 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001992 tcp_v4_send_reset(sk, skb);
1993 inet_twsk_deschedule_put(inet_twsk(sk));
1994 goto discard_it;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 case TCP_TW_SUCCESS:;
1996 }
1997 goto discard_it;
1998}
1999
David S. Millerccb7c412010-12-01 18:09:13 -08002000static struct timewait_sock_ops tcp_timewait_sock_ops = {
2001 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2002 .twsk_unique = tcp_twsk_unique,
2003 .twsk_destructor= tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08002004};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005
Eric Dumazet63d02d12012-08-09 14:11:00 +00002006void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
Eric Dumazet5d299f32012-08-06 05:09:33 +00002007{
2008 struct dst_entry *dst = skb_dst(skb);
2009
Eric Dumazet5037e9e2015-12-14 14:08:53 -08002010 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -07002011 sk->sk_rx_dst = dst;
2012 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2013 }
Eric Dumazet5d299f32012-08-06 05:09:33 +00002014}
Eric Dumazet63d02d12012-08-09 14:11:00 +00002015EXPORT_SYMBOL(inet_sk_rx_dst_set);
Eric Dumazet5d299f32012-08-06 05:09:33 +00002016
Stephen Hemminger3b401a82009-09-01 19:25:04 +00002017const struct inet_connection_sock_af_ops ipv4_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002018 .queue_xmit = ip_queue_xmit,
2019 .send_check = tcp_v4_send_check,
2020 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00002021 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002022 .conn_request = tcp_v4_conn_request,
2023 .syn_recv_sock = tcp_v4_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002024 .net_header_len = sizeof(struct iphdr),
2025 .setsockopt = ip_setsockopt,
2026 .getsockopt = ip_getsockopt,
2027 .addr2sockaddr = inet_csk_addr2sockaddr,
2028 .sockaddr_len = sizeof(struct sockaddr_in),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002029#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002030 .compat_setsockopt = compat_ip_setsockopt,
2031 .compat_getsockopt = compat_ip_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002032#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04002033 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00002035EXPORT_SYMBOL(ipv4_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002037#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00002038static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002039 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07002040 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002041 .md5_parse = tcp_v4_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002042};
Andrew Mortonb6332e62006-11-30 19:16:28 -08002043#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002044
Linus Torvalds1da177e2005-04-16 15:20:36 -07002045/* NOTE: A lot of things set to zero explicitly by call to
2046 * sk_alloc() so need not be done here.
2047 */
2048static int tcp_v4_init_sock(struct sock *sk)
2049{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002050 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051
Neal Cardwell900f65d2012-04-19 09:55:21 +00002052 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08002054 icsk->icsk_af_ops = &ipv4_specific;
Neal Cardwell900f65d2012-04-19 09:55:21 +00002055
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002056#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04002057 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002058#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060 return 0;
2061}
2062
Brian Haley7d06b2e2008-06-14 17:04:49 -07002063void tcp_v4_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064{
2065 struct tcp_sock *tp = tcp_sk(sk);
2066
Song Liue1a4aa52017-10-23 09:20:26 -07002067 trace_tcp_destroy_sock(sk);
2068
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 tcp_clear_xmit_timers(sk);
2070
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002071 tcp_cleanup_congestion_control(sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002072
Dave Watson734942c2017-06-14 11:37:14 -07002073 tcp_cleanup_ulp(sk);
2074
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 /* Cleanup up the write buffer. */
David S. Millerfe067e82007-03-07 12:12:44 -08002076 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
Wei Wangcf1ef3f2017-04-20 14:45:46 -07002078 /* Check if we want to disable active TFO */
2079 tcp_fastopen_active_disable_ofo_check(sk);
2080
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 /* Cleans up our, hopefully empty, out_of_order_queue. */
Yaogong Wang9f5afea2016-09-07 14:49:28 -07002082 skb_rbtree_purge(&tp->out_of_order_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002084#ifdef CONFIG_TCP_MD5SIG
2085 /* Clean up the MD5 key list, if any */
2086 if (tp->md5sig_info) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00002087 tcp_clear_md5_list(sk);
Mat Martineaufb7df5e2017-12-21 10:29:10 -08002088 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002089 tp->md5sig_info = NULL;
2090 }
2091#endif
2092
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 /* Clean up a referenced TCP bind bucket. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002094 if (inet_csk(sk)->icsk_bind_hash)
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002095 inet_put_port(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
Ian Morris00db4122015-04-03 09:17:27 +01002097 BUG_ON(tp->fastopen_rsk);
William Allen Simpson435cf552009-12-02 18:17:05 +00002098
Yuchung Chengcf60af02012-07-19 06:43:09 +00002099 /* If socket is aborted during connect operation */
2100 tcp_free_fastopen_req(tp);
Yuchung Cheng1fba70e2017-10-18 11:22:51 -07002101 tcp_fastopen_destroy_cipher(sk);
Eric Dumazetcd8ae852015-05-03 21:34:46 -07002102 tcp_saved_syn_free(tp);
Yuchung Chengcf60af02012-07-19 06:43:09 +00002103
Glauber Costa180d8cd2011-12-11 21:47:02 +00002104 sk_sockets_allocated_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106EXPORT_SYMBOL(tcp_v4_destroy_sock);
2107
2108#ifdef CONFIG_PROC_FS
2109/* Proc filesystem TCP sock list dumping. */
2110
Tom Herberta8b690f2010-06-07 00:43:42 -07002111/*
2112 * Get next listener socket follow cur. If cur is NULL, get first socket
2113 * starting from bucket given in st->bucket; when st->bucket is zero the
2114 * very first socket in the hash table is returned.
2115 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116static void *listening_get_next(struct seq_file *seq, void *cur)
2117{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002118 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08002119 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002120 struct net *net = seq_file_net(seq);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002121 struct inet_listen_hashbucket *ilb;
Eric Dumazet3b24d852016-04-01 08:52:17 -07002122 struct sock *sk = cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
2124 if (!sk) {
Eric Dumazet3b24d852016-04-01 08:52:17 -07002125get_head:
Tom Herberta8b690f2010-06-07 00:43:42 -07002126 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Eric Dumazet9652dc22016-10-19 21:24:58 -07002127 spin_lock(&ilb->lock);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002128 sk = sk_head(&ilb->head);
Tom Herberta8b690f2010-06-07 00:43:42 -07002129 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 goto get_sk;
2131 }
Eric Dumazet5caea4e2008-11-20 00:40:07 -08002132 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002134 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
Eric Dumazet3b24d852016-04-01 08:52:17 -07002136 sk = sk_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137get_sk:
Eric Dumazet3b24d852016-04-01 08:52:17 -07002138 sk_for_each_from(sk) {
Pavel Emelyanov8475ef92010-11-22 03:26:12 +00002139 if (!net_eq(sock_net(sk), net))
2140 continue;
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002141 if (sk->sk_family == afinfo->family)
Eric Dumazet3b24d852016-04-01 08:52:17 -07002142 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 }
Eric Dumazet9652dc22016-10-19 21:24:58 -07002144 spin_unlock(&ilb->lock);
Tom Herberta8b690f2010-06-07 00:43:42 -07002145 st->offset = 0;
Eric Dumazet3b24d852016-04-01 08:52:17 -07002146 if (++st->bucket < INET_LHTABLE_SIZE)
2147 goto get_head;
2148 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149}
2150
2151static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2152{
Tom Herberta8b690f2010-06-07 00:43:42 -07002153 struct tcp_iter_state *st = seq->private;
2154 void *rc;
2155
2156 st->bucket = 0;
2157 st->offset = 0;
2158 rc = listening_get_next(seq, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159
2160 while (rc && *pos) {
2161 rc = listening_get_next(seq, rc);
2162 --*pos;
2163 }
2164 return rc;
2165}
2166
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002167static inline bool empty_bucket(const struct tcp_iter_state *st)
Andi Kleen6eac5602008-08-28 01:08:02 -07002168{
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002169 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
Andi Kleen6eac5602008-08-28 01:08:02 -07002170}
2171
Tom Herberta8b690f2010-06-07 00:43:42 -07002172/*
2173 * Get first established socket starting from bucket given in st->bucket.
2174 * If st->bucket is zero, the very first socket in the hash is returned.
2175 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176static void *established_get_first(struct seq_file *seq)
2177{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002178 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08002179 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002180 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 void *rc = NULL;
2182
Tom Herberta8b690f2010-06-07 00:43:42 -07002183 st->offset = 0;
2184 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 struct sock *sk;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002186 struct hlist_nulls_node *node;
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002187 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
Andi Kleen6eac5602008-08-28 01:08:02 -07002189 /* Lockless fast path for the common case of empty buckets */
2190 if (empty_bucket(st))
2191 continue;
2192
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002193 spin_lock_bh(lock);
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002194 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002195 if (sk->sk_family != afinfo->family ||
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002196 !net_eq(sock_net(sk), net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 continue;
2198 }
2199 rc = sk;
2200 goto out;
2201 }
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002202 spin_unlock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 }
2204out:
2205 return rc;
2206}
2207
2208static void *established_get_next(struct seq_file *seq, void *cur)
2209{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002210 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 struct sock *sk = cur;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002212 struct hlist_nulls_node *node;
Jianjun Kong5799de02008-11-03 02:49:10 -08002213 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002214 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
2216 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002217 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002219 sk = sk_nulls_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002221 sk_nulls_for_each_from(sk, node) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002222 if (sk->sk_family == afinfo->family &&
2223 net_eq(sock_net(sk), net))
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002224 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 }
2226
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002227 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2228 ++st->bucket;
2229 return established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230}
2231
2232static void *established_get_idx(struct seq_file *seq, loff_t pos)
2233{
Tom Herberta8b690f2010-06-07 00:43:42 -07002234 struct tcp_iter_state *st = seq->private;
2235 void *rc;
2236
2237 st->bucket = 0;
2238 rc = established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239
2240 while (rc && pos) {
2241 rc = established_get_next(seq, rc);
2242 --pos;
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02002243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 return rc;
2245}
2246
2247static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2248{
2249 void *rc;
Jianjun Kong5799de02008-11-03 02:49:10 -08002250 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 st->state = TCP_SEQ_STATE_LISTENING;
2253 rc = listening_get_idx(seq, &pos);
2254
2255 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 st->state = TCP_SEQ_STATE_ESTABLISHED;
2257 rc = established_get_idx(seq, pos);
2258 }
2259
2260 return rc;
2261}
2262
Tom Herberta8b690f2010-06-07 00:43:42 -07002263static void *tcp_seek_last_pos(struct seq_file *seq)
2264{
2265 struct tcp_iter_state *st = seq->private;
2266 int offset = st->offset;
2267 int orig_num = st->num;
2268 void *rc = NULL;
2269
2270 switch (st->state) {
Tom Herberta8b690f2010-06-07 00:43:42 -07002271 case TCP_SEQ_STATE_LISTENING:
2272 if (st->bucket >= INET_LHTABLE_SIZE)
2273 break;
2274 st->state = TCP_SEQ_STATE_LISTENING;
2275 rc = listening_get_next(seq, NULL);
2276 while (offset-- && rc)
2277 rc = listening_get_next(seq, rc);
2278 if (rc)
2279 break;
2280 st->bucket = 0;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002281 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002282 /* Fallthrough */
2283 case TCP_SEQ_STATE_ESTABLISHED:
Tom Herberta8b690f2010-06-07 00:43:42 -07002284 if (st->bucket > tcp_hashinfo.ehash_mask)
2285 break;
2286 rc = established_get_first(seq);
2287 while (offset-- && rc)
2288 rc = established_get_next(seq, rc);
2289 }
2290
2291 st->num = orig_num;
2292
2293 return rc;
2294}
2295
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002296void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297{
Jianjun Kong5799de02008-11-03 02:49:10 -08002298 struct tcp_iter_state *st = seq->private;
Tom Herberta8b690f2010-06-07 00:43:42 -07002299 void *rc;
2300
2301 if (*pos && *pos == st->last_pos) {
2302 rc = tcp_seek_last_pos(seq);
2303 if (rc)
2304 goto out;
2305 }
2306
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 st->state = TCP_SEQ_STATE_LISTENING;
2308 st->num = 0;
Tom Herberta8b690f2010-06-07 00:43:42 -07002309 st->bucket = 0;
2310 st->offset = 0;
2311 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2312
2313out:
2314 st->last_pos = *pos;
2315 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002317EXPORT_SYMBOL(tcp_seq_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002319void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320{
Tom Herberta8b690f2010-06-07 00:43:42 -07002321 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 void *rc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323
2324 if (v == SEQ_START_TOKEN) {
2325 rc = tcp_get_idx(seq, 0);
2326 goto out;
2327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328
2329 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 case TCP_SEQ_STATE_LISTENING:
2331 rc = listening_get_next(seq, v);
2332 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002334 st->bucket = 0;
2335 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 rc = established_get_first(seq);
2337 }
2338 break;
2339 case TCP_SEQ_STATE_ESTABLISHED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 rc = established_get_next(seq, v);
2341 break;
2342 }
2343out:
2344 ++*pos;
Tom Herberta8b690f2010-06-07 00:43:42 -07002345 st->last_pos = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 return rc;
2347}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002348EXPORT_SYMBOL(tcp_seq_next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002350void tcp_seq_stop(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351{
Jianjun Kong5799de02008-11-03 02:49:10 -08002352 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
2354 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 case TCP_SEQ_STATE_LISTENING:
2356 if (v != SEQ_START_TOKEN)
Eric Dumazet9652dc22016-10-19 21:24:58 -07002357 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 case TCP_SEQ_STATE_ESTABLISHED:
2360 if (v)
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002361 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 break;
2363 }
2364}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002365EXPORT_SYMBOL(tcp_seq_stop);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366
Eric Dumazetd4f06872015-03-12 16:44:09 -07002367static void get_openreq4(const struct request_sock *req,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002368 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002370 const struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetfa76ce732015-03-19 19:04:20 -07002371 long delta = req->rsk_timer.expires - jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002373 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002374 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 i,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002376 ireq->ir_loc_addr,
Eric Dumazetd4f06872015-03-12 16:44:09 -07002377 ireq->ir_num,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002378 ireq->ir_rmt_addr,
2379 ntohs(ireq->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 TCP_SYN_RECV,
2381 0, 0, /* could print option size, but that is af dependent. */
2382 1, /* timers active (only the expire timer) */
Eric Dumazeta399a802012-08-08 21:13:53 +00002383 jiffies_delta_to_clock_t(delta),
Eric Dumazete6c022a2012-10-27 23:16:46 +00002384 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002385 from_kuid_munged(seq_user_ns(f),
2386 sock_i_uid(req->rsk_listener)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 0, /* non standard timer */
2388 0, /* open_requests have no inode */
Eric Dumazetd4f06872015-03-12 16:44:09 -07002389 0,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002390 req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391}
2392
Tetsuo Handa652586d2013-11-14 14:31:57 -08002393static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394{
2395 int timer_active;
2396 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002397 const struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002398 const struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002399 const struct inet_sock *inet = inet_sk(sk);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002400 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00002401 __be32 dest = inet->inet_daddr;
2402 __be32 src = inet->inet_rcv_saddr;
2403 __u16 destp = ntohs(inet->inet_dport);
2404 __u16 srcp = ntohs(inet->inet_sport);
Eric Dumazet49d09002009-12-03 16:06:13 -08002405 int rx_queue;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002406 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002408 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08002409 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002410 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002412 timer_expires = icsk->icsk_timeout;
2413 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002415 timer_expires = icsk->icsk_timeout;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002416 } else if (timer_pending(&sk->sk_timer)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 timer_active = 2;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002418 timer_expires = sk->sk_timer.expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 } else {
2420 timer_active = 0;
2421 timer_expires = jiffies;
2422 }
2423
Yafang Shao986ffdf2017-12-20 11:12:52 +08002424 state = inet_sk_state_load(sk);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002425 if (state == TCP_LISTEN)
Eric Dumazet49d09002009-12-03 16:06:13 -08002426 rx_queue = sk->sk_ack_backlog;
2427 else
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002428 /* Because we don't lock the socket,
2429 * we might find a transient negative value.
Eric Dumazet49d09002009-12-03 16:06:13 -08002430 */
2431 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2432
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002433 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
Tetsuo Handa652586d2013-11-14 14:31:57 -08002434 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002435 i, src, srcp, dest, destp, state,
Sridhar Samudrala47da8ee2006-06-27 13:29:00 -07002436 tp->write_seq - tp->snd_una,
Eric Dumazet49d09002009-12-03 16:06:13 -08002437 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00002439 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002440 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002441 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002442 icsk->icsk_probes_out,
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002443 sock_i_ino(sk),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002444 refcount_read(&sk->sk_refcnt), sk,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002445 jiffies_to_clock_t(icsk->icsk_rto),
2446 jiffies_to_clock_t(icsk->icsk_ack.ato),
Wei Wang31954cd2019-01-25 10:53:19 -08002447 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002449 state == TCP_LISTEN ?
2450 fastopenq->max_qlen :
Tetsuo Handa652586d2013-11-14 14:31:57 -08002451 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452}
2453
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002454static void get_timewait4_sock(const struct inet_timewait_sock *tw,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002455 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456{
Eric Dumazet789f5582015-04-12 18:51:09 -07002457 long delta = tw->tw_timer.expires - jiffies;
Al Viro23f33c22006-09-27 18:43:50 -07002458 __be32 dest, src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460
2461 dest = tw->tw_daddr;
2462 src = tw->tw_rcv_saddr;
2463 destp = ntohs(tw->tw_dport);
2464 srcp = ntohs(tw->tw_sport);
2465
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002466 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002467 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002469 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002470 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471}
2472
2473#define TMPSZ 150
2474
2475static int tcp4_seq_show(struct seq_file *seq, void *v)
2476{
Jianjun Kong5799de02008-11-03 02:49:10 -08002477 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002478 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479
Tetsuo Handa652586d2013-11-14 14:31:57 -08002480 seq_setwidth(seq, TMPSZ - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481 if (v == SEQ_START_TOKEN) {
Tetsuo Handa652586d2013-11-14 14:31:57 -08002482 seq_puts(seq, " sl local_address rem_address st tx_queue "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 "rx_queue tr tm->when retrnsmt uid timeout "
2484 "inode");
2485 goto out;
2486 }
2487 st = seq->private;
2488
Eric Dumazet079096f2015-10-02 11:43:32 -07002489 if (sk->sk_state == TCP_TIME_WAIT)
2490 get_timewait4_sock(v, seq, st->num);
2491 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002492 get_openreq4(v, seq, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002493 else
2494 get_tcp4_sock(v, seq, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495out:
Tetsuo Handa652586d2013-11-14 14:31:57 -08002496 seq_pad(seq, '\n');
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497 return 0;
2498}
2499
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002500static const struct seq_operations tcp4_seq_ops = {
2501 .show = tcp4_seq_show,
2502 .start = tcp_seq_start,
2503 .next = tcp_seq_next,
2504 .stop = tcp_seq_stop,
2505};
2506
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507static struct tcp_seq_afinfo tcp4_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 .family = AF_INET,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509};
2510
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002511static int __net_init tcp4_proc_init_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002512{
Christoph Hellwigc3506372018-04-10 19:42:55 +02002513 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2514 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002515 return -ENOMEM;
2516 return 0;
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002517}
2518
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002519static void __net_exit tcp4_proc_exit_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002520{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002521 remove_proc_entry("tcp", net->proc_net);
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002522}
2523
2524static struct pernet_operations tcp4_net_ops = {
2525 .init = tcp4_proc_init_net,
2526 .exit = tcp4_proc_exit_net,
2527};
2528
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529int __init tcp4_proc_init(void)
2530{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002531 return register_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532}
2533
2534void tcp4_proc_exit(void)
2535{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002536 unregister_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537}
2538#endif /* CONFIG_PROC_FS */
2539
2540struct proto tcp_prot = {
2541 .name = "TCP",
2542 .owner = THIS_MODULE,
2543 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002544 .pre_connect = tcp_v4_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545 .connect = tcp_v4_connect,
2546 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002547 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 .ioctl = tcp_ioctl,
2549 .init = tcp_v4_init_sock,
2550 .destroy = tcp_v4_destroy_sock,
2551 .shutdown = tcp_shutdown,
2552 .setsockopt = tcp_setsockopt,
2553 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01002554 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002556 .sendmsg = tcp_sendmsg,
2557 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 .backlog_rcv = tcp_v4_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002559 .release_cb = tcp_release_cb,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002560 .hash = inet_hash,
2561 .unhash = inet_unhash,
2562 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07002564 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07002565 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 .sockets_allocated = &tcp_sockets_allocated,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002567 .orphan_count = &tcp_orphan_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 .memory_allocated = &tcp_memory_allocated,
2569 .memory_pressure = &tcp_memory_pressure,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07002570 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08002571 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2572 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 .max_header = MAX_TCP_HEADER,
2574 .obj_size = sizeof(struct tcp_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08002575 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002576 .twsk_prot = &tcp_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002577 .rsk_prot = &tcp_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002578 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002579 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002580#ifdef CONFIG_COMPAT
2581 .compat_setsockopt = compat_tcp_setsockopt,
2582 .compat_getsockopt = compat_tcp_getsockopt,
2583#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002584 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00002586EXPORT_SYMBOL(tcp_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587
Denis V. Lunev046ee902008-04-03 14:31:33 -07002588static void __net_exit tcp_sk_exit(struct net *net)
2589{
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002590 int cpu;
2591
Dust Lib506bc92019-04-01 16:04:53 +08002592 if (net->ipv4.tcp_congestion_control)
2593 module_put(net->ipv4.tcp_congestion_control->owner);
Stephen Hemminger6670e152017-11-14 08:25:49 -08002594
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002595 for_each_possible_cpu(cpu)
2596 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2597 free_percpu(net->ipv4.tcp_sk);
2598}
2599
2600static int __net_init tcp_sk_init(struct net *net)
2601{
Haishuang Yanfee83d02016-12-28 17:52:33 +08002602 int res, cpu, cnt;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002603
2604 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2605 if (!net->ipv4.tcp_sk)
2606 return -ENOMEM;
2607
2608 for_each_possible_cpu(cpu) {
2609 struct sock *sk;
2610
2611 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2612 IPPROTO_TCP, net);
2613 if (res)
2614 goto fail;
Eric Dumazeta9d65322016-04-01 08:52:21 -07002615 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
Eric Dumazet431280e2018-08-22 13:30:45 -07002616
2617 /* Please enforce IP_DF and IPID==0 for RST and
2618 * ACK sent in SYN-RECV and TIME-WAIT state.
2619 */
2620 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2621
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002622 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2623 }
Daniel Borkmann49213552015-05-19 21:04:22 +02002624
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002625 net->ipv4.sysctl_tcp_ecn = 2;
Daniel Borkmann49213552015-05-19 21:04:22 +02002626 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2627
Fan Dub0f9ca52015-02-10 09:53:16 +08002628 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
Fan Du6b58e0a2015-03-06 11:18:23 +08002629 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
Fan Du05cbc0d2015-03-06 11:18:24 +08002630 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002631
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002632 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
Nikolay Borisov9bd68612016-01-07 16:38:44 +02002633 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
Nikolay Borisovb840d152016-01-07 16:38:45 +02002634 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002635
Nikolay Borisov6fa25162016-02-03 09:46:49 +02002636 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
Nikolay Borisov7c083ec2016-02-03 09:46:50 +02002637 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
David S. Miller0aca7372016-02-08 04:24:33 -05002638 net->ipv4.sysctl_tcp_syncookies = 1;
Nikolay Borisov1043e252016-02-03 09:46:52 +02002639 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
Nikolay Borisovae5c3f42016-02-03 09:46:53 +02002640 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
Nikolay Borisovc6214a92016-02-03 09:46:54 +02002641 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
Nikolay Borisovc402d9b2016-02-03 09:46:55 +02002642 net->ipv4.sysctl_tcp_orphan_retries = 0;
Nikolay Borisov1e579ca2016-02-03 09:46:56 +02002643 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
Nikolay Borisov4979f2d2016-02-03 09:46:57 +02002644 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -07002645 net->ipv4.sysctl_tcp_tw_reuse = 2;
Nikolay Borisov12ed8242016-02-03 09:46:51 +02002646
Haishuang Yanfee83d02016-12-28 17:52:33 +08002647 cnt = tcp_hashinfo.ehash_mask + 1;
Yafang Shao743e4812018-09-01 20:21:05 +08002648 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
Haishuang Yan1946e672016-12-28 17:52:32 +08002649 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2650
Haishuang Yanfee83d02016-12-28 17:52:33 +08002651 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
Eric Dumazetf9301032017-06-07 10:34:37 -07002652 net->ipv4.sysctl_tcp_sack = 1;
Eric Dumazet9bb37ef2017-06-07 10:34:38 -07002653 net->ipv4.sysctl_tcp_window_scaling = 1;
Eric Dumazet5d2ed052017-06-07 10:34:39 -07002654 net->ipv4.sysctl_tcp_timestamps = 1;
Eric Dumazet2ae21cf2017-10-26 21:54:56 -07002655 net->ipv4.sysctl_tcp_early_retrans = 3;
Eric Dumazete20223f2017-10-26 21:54:57 -07002656 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
Eric Dumazetb510f0d2017-10-26 21:54:59 -07002657 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
Eric Dumazete0a1e5b2017-10-26 21:55:00 -07002658 net->ipv4.sysctl_tcp_retrans_collapse = 1;
Eric Dumazetc6e21802017-10-26 21:55:06 -07002659 net->ipv4.sysctl_tcp_max_reordering = 300;
Eric Dumazet6496f6b2017-10-26 21:55:07 -07002660 net->ipv4.sysctl_tcp_dsack = 1;
Eric Dumazet0c126542017-10-26 21:55:08 -07002661 net->ipv4.sysctl_tcp_app_win = 31;
Eric Dumazet94f08932017-10-26 21:55:09 -07002662 net->ipv4.sysctl_tcp_adv_win_scale = 1;
Eric Dumazetaf9b69a2017-10-26 21:55:10 -07002663 net->ipv4.sysctl_tcp_frto = 2;
Eric Dumazet4540c0c2017-10-27 07:47:22 -07002664 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
Eric Dumazetd06a9902017-10-27 07:47:23 -07002665 /* This limits the percentage of the congestion window which we
2666 * will allow a single TSO frame to consume. Building TSO frames
2667 * which are too large can cause TCP streams to be bursty.
2668 */
2669 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
Eric Dumazetc73e5802018-11-11 07:34:28 -08002670 /* Default TSQ limit of 16 TSO segments */
2671 net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
Eric Dumazetb530b682017-10-27 07:47:26 -07002672 /* rfc5961 challenge ack rate limiting */
2673 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
Eric Dumazet26e95962017-10-27 07:47:27 -07002674 net->ipv4.sysctl_tcp_min_tso_segs = 2;
Eric Dumazetbd239702017-10-27 07:47:28 -07002675 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
Eric Dumazet790f00e2017-10-27 07:47:29 -07002676 net->ipv4.sysctl_tcp_autocorking = 1;
Eric Dumazet4170ba62017-10-27 07:47:30 -07002677 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
Eric Dumazet23a7102a2017-10-27 07:47:31 -07002678 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
Eric Dumazetc26e91f2017-10-27 07:47:32 -07002679 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
Eric Dumazet356d1832017-11-07 00:29:28 -08002680 if (net != &init_net) {
2681 memcpy(net->ipv4.sysctl_tcp_rmem,
2682 init_net.ipv4.sysctl_tcp_rmem,
2683 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2684 memcpy(net->ipv4.sysctl_tcp_wmem,
2685 init_net.ipv4.sysctl_tcp_wmem,
2686 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2687 }
Eric Dumazet6d82aa22018-05-17 14:47:28 -07002688 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
Eric Dumazet9c21d2f2018-05-17 14:47:29 -07002689 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002690 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
Haishuang Yan43713842017-09-27 11:35:42 +08002691 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
Haishuang Yan3733be12017-09-27 11:35:43 +08002692 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2693 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002694
Stephen Hemminger6670e152017-11-14 08:25:49 -08002695 /* Reno is always built in */
2696 if (!net_eq(net, &init_net) &&
2697 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2698 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2699 else
2700 net->ipv4.tcp_congestion_control = &tcp_reno;
2701
Daniel Borkmann49213552015-05-19 21:04:22 +02002702 return 0;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002703fail:
2704 tcp_sk_exit(net);
2705
2706 return res;
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002707}
2708
2709static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2710{
Haishuang Yan43713842017-09-27 11:35:42 +08002711 struct net *net;
2712
Haishuang Yan1946e672016-12-28 17:52:32 +08002713 inet_twsk_purge(&tcp_hashinfo, AF_INET);
Haishuang Yan43713842017-09-27 11:35:42 +08002714
2715 list_for_each_entry(net, net_exit_list, exit_list)
2716 tcp_fastopen_ctx_destroy(net);
Denis V. Lunev046ee902008-04-03 14:31:33 -07002717}
2718
2719static struct pernet_operations __net_initdata tcp_sk_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002720 .init = tcp_sk_init,
2721 .exit = tcp_sk_exit,
2722 .exit_batch = tcp_sk_exit_batch,
Denis V. Lunev046ee902008-04-03 14:31:33 -07002723};
2724
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08002725void __init tcp_v4_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726{
Eric W. Biederman6a1b3052009-02-22 00:10:18 -08002727 if (register_pernet_subsys(&tcp_sk_ops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 panic("Failed to create the TCP control socket.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729}