blob: 1b7e9e1fbd3be3670a7fe9da4978f7a2e0959f58 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * IPv4 specific functions
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19/*
20 * Changes:
21 * David S. Miller : New socket lookup architecture.
22 * This code is dedicated to John Dyson.
23 * David S. Miller : Change semantics of established hash,
24 * half is devoted to TIME_WAIT sockets
25 * and the rest go in the other half.
26 * Andi Kleen : Add support for syncookies and fixed
27 * some bugs: ip options weren't passed to
28 * the TCP layer, missed a check for an
29 * ACK bit.
30 * Andi Kleen : Implemented fast path mtu discovery.
31 * Fixed many serious bugs in the
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070032 * request_sock handling and moved
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 * most of it into the af independent code.
34 * Added tail drop and some other bugfixes.
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -080035 * Added new listen semantics.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * Mike McLagan : Routing by source
37 * Juan Jose Ciarlante: ip_dynaddr bits
38 * Andi Kleen: various fixes.
39 * Vitaly E. Lavrov : Transparent proxy revived after year
40 * coma.
41 * Andi Kleen : Fix new listen.
42 * Andi Kleen : Fix accept error reporting.
43 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
44 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
45 * a single port at the same time.
46 */
47
Joe Perchesafd465032012-03-12 07:03:32 +000048#define pr_fmt(fmt) "TCP: " fmt
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Herbert Xueb4dea52008-12-29 23:04:08 -080050#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <linux/types.h>
52#include <linux/fcntl.h>
53#include <linux/module.h>
54#include <linux/random.h>
55#include <linux/cache.h>
56#include <linux/jhash.h>
57#include <linux/init.h>
58#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090059#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020061#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#include <net/icmp.h>
Arnaldo Carvalho de Melo304a1612005-08-09 19:59:20 -070063#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <net/tcp.h>
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -030065#include <net/transp_v6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <net/ipv6.h>
67#include <net/inet_common.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080068#include <net/timewait_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <net/xfrm.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070070#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030071#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73#include <linux/inet.h>
74#include <linux/ipv6.h>
75#include <linux/stddef.h>
76#include <linux/proc_fs.h>
77#include <linux/seq_file.h>
Ivan Delalande67973182017-06-15 18:07:06 -070078#include <linux/inetdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Herbert Xucf80e0e2016-01-24 21:20:23 +080080#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080081#include <linux/scatterlist.h>
82
Song Liuc24b14c42017-10-23 09:20:24 -070083#include <trace/events/tcp.h>
84
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080085#ifdef CONFIG_TCP_MD5SIG
Eric Dumazeta915da9b2012-01-31 05:18:33 +000086static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -040087 __be32 daddr, __be32 saddr, const struct tcphdr *th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080088#endif
89
Eric Dumazet5caea4e2008-11-20 00:40:07 -080090struct inet_hashinfo tcp_hashinfo;
Eric Dumazet4bc2f182010-07-09 21:22:10 +000091EXPORT_SYMBOL(tcp_hashinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Eric Dumazet84b114b2017-05-05 06:56:54 -070093static u32 tcp_v4_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
Eric Dumazet84b114b2017-05-05 06:56:54 -070095 return secure_tcp_seq(ip_hdr(skb)->daddr,
96 ip_hdr(skb)->saddr,
97 tcp_hdr(skb)->dest,
98 tcp_hdr(skb)->source);
99}
100
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700101static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700102{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700103 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104}
105
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800106int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
107{
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700108 const struct inet_timewait_sock *tw = inet_twsk(sktw);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800109 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
110 struct tcp_sock *tp = tcp_sk(sk);
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700111 int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
112
113 if (reuse == 2) {
114 /* Still does not detect *everything* that goes through
115 * lo, since we require a loopback src or dst address
116 * or direct binding to 'lo' interface.
117 */
118 bool loopback = false;
119 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
120 loopback = true;
121#if IS_ENABLED(CONFIG_IPV6)
122 if (tw->tw_family == AF_INET6) {
123 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
124 (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
125 (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
126 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
127 (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
128 (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
129 loopback = true;
130 } else
131#endif
132 {
133 if (ipv4_is_loopback(tw->tw_daddr) ||
134 ipv4_is_loopback(tw->tw_rcv_saddr))
135 loopback = true;
136 }
137 if (!loopback)
138 reuse = 0;
139 }
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800140
141 /* With PAWS, it is safe from the viewpoint
142 of data integrity. Even without PAWS it is safe provided sequence
143 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
144
145 Actually, the idea is close to VJ's one, only timestamp cache is
146 held not per host, but per port pair and TW bucket is used as state
147 holder.
148
149 If TW bucket has been already destroyed we fall back to VJ's scheme
150 and use initial timestamp retrieved from peer table.
151 */
152 if (tcptw->tw_ts_recent_stamp &&
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200153 (!twp || (reuse && time_after32(ktime_get_seconds(),
154 tcptw->tw_ts_recent_stamp)))) {
Stefan Baranoff21684dc2018-07-10 17:25:20 -0400155 /* In case of repair and re-using TIME-WAIT sockets we still
156 * want to be sure that it is safe as above but honor the
157 * sequence numbers and time stamps set as part of the repair
158 * process.
159 *
160 * Without this check re-using a TIME-WAIT socket with TCP
161 * repair would accumulate a -1 on the repair assigned
162 * sequence number. The first time it is reused the sequence
163 * is -1, the second time -2, etc. This fixes that issue
164 * without appearing to create any others.
165 */
166 if (likely(!tp->repair)) {
167 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
168 if (tp->write_seq == 0)
169 tp->write_seq = 1;
170 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
171 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
172 }
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800173 sock_hold(sktw);
174 return 1;
175 }
176
177 return 0;
178}
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800179EXPORT_SYMBOL_GPL(tcp_twsk_unique);
180
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700181static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
182 int addr_len)
183{
184 /* This check is replicated from tcp_v4_connect() and intended to
185 * prevent BPF program called below from accessing bytes that are out
186 * of the bound specified by user in addr_len.
187 */
188 if (addr_len < sizeof(struct sockaddr_in))
189 return -EINVAL;
190
191 sock_owned_by_me(sk);
192
193 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
194}
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196/* This will initiate an outgoing connection. */
197int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
198{
David S. Miller2d7192d2011-04-26 13:28:44 -0700199 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 struct inet_sock *inet = inet_sk(sk);
201 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerdca8b082011-02-24 13:38:12 -0800202 __be16 orig_sport, orig_dport;
Al Virobada8ad2006-09-26 21:27:15 -0700203 __be32 daddr, nexthop;
David S. Millerda905bd2011-05-06 16:11:19 -0700204 struct flowi4 *fl4;
David S. Miller2d7192d2011-04-26 13:28:44 -0700205 struct rtable *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 int err;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000207 struct ip_options_rcu *inet_opt;
Haishuang Yan1946e672016-12-28 17:52:32 +0800208 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 if (addr_len < sizeof(struct sockaddr_in))
211 return -EINVAL;
212
213 if (usin->sin_family != AF_INET)
214 return -EAFNOSUPPORT;
215
216 nexthop = daddr = usin->sin_addr.s_addr;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000217 inet_opt = rcu_dereference_protected(inet->inet_opt,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200218 lockdep_sock_is_held(sk));
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000219 if (inet_opt && inet_opt->opt.srr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 if (!daddr)
221 return -EINVAL;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000222 nexthop = inet_opt->opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 }
224
David S. Millerdca8b082011-02-24 13:38:12 -0800225 orig_sport = inet->inet_sport;
226 orig_dport = usin->sin_port;
David S. Millerda905bd2011-05-06 16:11:19 -0700227 fl4 = &inet->cork.fl.u.ip4;
228 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800229 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
230 IPPROTO_TCP,
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200231 orig_sport, orig_dport, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800232 if (IS_ERR(rt)) {
233 err = PTR_ERR(rt);
234 if (err == -ENETUNREACH)
Eric Dumazetf1d8cba2013-11-28 09:51:22 -0800235 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800236 return err;
Wei Dong584bdf82007-05-31 22:49:28 -0700237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
240 ip_rt_put(rt);
241 return -ENETUNREACH;
242 }
243
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000244 if (!inet_opt || !inet_opt->opt.srr)
David S. Millerda905bd2011-05-06 16:11:19 -0700245 daddr = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000247 if (!inet->inet_saddr)
David S. Millerda905bd2011-05-06 16:11:19 -0700248 inet->inet_saddr = fl4->saddr;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700249 sk_rcv_saddr_set(sk, inet->inet_saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000251 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 /* Reset inherited state */
253 tp->rx_opt.ts_recent = 0;
254 tp->rx_opt.ts_recent_stamp = 0;
Pavel Emelyanovee995282012-04-19 03:40:39 +0000255 if (likely(!tp->repair))
256 tp->write_seq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000259 inet->inet_dport = usin->sin_port;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700260 sk_daddr_set(sk, daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800262 inet_csk(sk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000263 if (inet_opt)
264 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000266 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268 /* Socket identity is still unknown (sport may be zero).
269 * However we set state to SYN-SENT and not releasing socket
270 * lock select source port, enter ourselves into the hash tables and
271 * complete initialization after this.
272 */
273 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800274 err = inet_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 if (err)
276 goto failure;
277
Tom Herbert877d1f62015-07-28 16:02:05 -0700278 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530279
David S. Millerda905bd2011-05-06 16:11:19 -0700280 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800281 inet->inet_sport, inet->inet_dport, sk);
282 if (IS_ERR(rt)) {
283 err = PTR_ERR(rt);
284 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 goto failure;
David S. Millerb23dd4f2011-03-02 14:31:35 -0800286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 /* OK, now commit destination to socket. */
Herbert Xubcd76112006-06-30 13:36:35 -0700288 sk->sk_gso_type = SKB_GSO_TCPV4;
Changli Gaod8d1f302010-06-10 23:31:35 -0700289 sk_setup_caps(sk, &rt->dst);
Wei Wang19f6d3f2017-01-23 10:59:22 -0800290 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300292 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300293 if (!tp->write_seq)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700294 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
295 inet->inet_daddr,
296 inet->inet_sport,
297 usin->sin_port);
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700298 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
299 inet->inet_saddr,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700300 inet->inet_daddr);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300301 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000303 inet->inet_id = tp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Wei Wang19f6d3f2017-01-23 10:59:22 -0800305 if (tcp_fastopen_defer_connect(sk, &err))
306 return err;
307 if (err)
308 goto failure;
309
Andrey Vagin2b916472012-11-22 01:13:58 +0000310 err = tcp_connect(sk);
Pavel Emelyanovee995282012-04-19 03:40:39 +0000311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 if (err)
313 goto failure;
314
315 return 0;
316
317failure:
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200318 /*
319 * This unhashes the socket and releases the local port,
320 * if necessary.
321 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 tcp_set_state(sk, TCP_CLOSE);
323 ip_rt_put(rt);
324 sk->sk_route_caps = 0;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000325 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 return err;
327}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000328EXPORT_SYMBOL(tcp_v4_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330/*
Eric Dumazet563d34d2012-07-23 09:48:52 +0200331 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
332 * It can be called through tcp_release_cb() if socket was owned by user
333 * at the time tcp_v4_err() was called to handle ICMP message.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 */
Neal Cardwell4fab9072014-08-14 12:40:05 -0400335void tcp_v4_mtu_reduced(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800338 struct dst_entry *dst;
339 u32 mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800341 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
342 return;
343 mtu = tcp_sk(sk)->mtu_info;
David S. Miller80d0a692012-07-16 03:28:06 -0700344 dst = inet_csk_update_pmtu(sk, mtu);
345 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return;
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 /* Something is about to be wrong... Remember soft error
349 * for the case, if this connection will not able to recover.
350 */
351 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
352 sk->sk_err_soft = EMSGSIZE;
353
354 mtu = dst_mtu(dst);
355
356 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +0100357 ip_sk_accept_pmtu(sk) &&
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800358 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 tcp_sync_mss(sk, mtu);
360
361 /* Resend the TCP packet because it's
362 * clear that the old packet has been
363 * dropped. This is the new "fast" path mtu
364 * discovery.
365 */
366 tcp_simple_retransmit(sk);
367 } /* else let the usual retransmit timer handle it */
368}
Neal Cardwell4fab9072014-08-14 12:40:05 -0400369EXPORT_SYMBOL(tcp_v4_mtu_reduced);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
David S. Miller55be7a92012-07-11 21:27:49 -0700371static void do_redirect(struct sk_buff *skb, struct sock *sk)
372{
373 struct dst_entry *dst = __sk_dst_check(sk, 0);
374
David S. Miller1ed5c482012-07-12 00:41:25 -0700375 if (dst)
David S. Miller6700c272012-07-17 03:29:28 -0700376 dst->ops->redirect(dst, sk, skb);
David S. Miller55be7a92012-07-11 21:27:49 -0700377}
378
Eric Dumazet26e37362015-03-22 10:22:22 -0700379
380/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
Eric Dumazet9cf74902016-02-02 19:31:12 -0800381void tcp_req_err(struct sock *sk, u32 seq, bool abort)
Eric Dumazet26e37362015-03-22 10:22:22 -0700382{
383 struct request_sock *req = inet_reqsk(sk);
384 struct net *net = sock_net(sk);
385
386 /* ICMPs are not backlogged, hence we cannot get
387 * an established socket here.
388 */
Eric Dumazet26e37362015-03-22 10:22:22 -0700389 if (seq != tcp_rsk(req)->snt_isn) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700390 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800391 } else if (abort) {
Eric Dumazet26e37362015-03-22 10:22:22 -0700392 /*
393 * Still in SYN_RECV, just remove it silently.
394 * There is no good way to pass the error to the newly
395 * created socket, and POSIX does not want network
396 * errors returned from accept().
397 */
Fan Duc6973662015-03-23 15:00:41 -0700398 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
Eric Dumazet9caad862016-04-01 08:52:20 -0700399 tcp_listendrop(req->rsk_listener);
Eric Dumazet26e37362015-03-22 10:22:22 -0700400 }
Eric Dumazetef84d8c2015-10-14 11:16:26 -0700401 reqsk_put(req);
Eric Dumazet26e37362015-03-22 10:22:22 -0700402}
403EXPORT_SYMBOL(tcp_req_err);
404
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405/*
406 * This routine is called by the ICMP module when it gets some
407 * sort of error condition. If err < 0 then the socket should
408 * be closed and the error returned to the user. If err > 0
409 * it's just the icmp type << 8 | icmp code. After adjustment
410 * header points to the first 8 bytes of the tcp header. We need
411 * to find the appropriate port.
412 *
413 * The locking strategy used here is very "optimistic". When
414 * someone else accesses the socket the ICMP is just dropped
415 * and for some paths there is no check at all.
416 * A more general error queue to queue errors for later handling
417 * is probably better.
418 *
419 */
420
Stefano Brivio32bbd872018-11-08 12:19:21 +0100421int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000423 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000424 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000425 struct inet_connection_sock *icsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 struct tcp_sock *tp;
427 struct inet_sock *inet;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000428 const int type = icmp_hdr(icmp_skb)->type;
429 const int code = icmp_hdr(icmp_skb)->code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 struct sock *sk;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000431 struct sk_buff *skb;
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700432 struct request_sock *fastopen;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700433 u32 seq, snd_una;
434 s32 remaining;
435 u32 delta_us;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 int err;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000437 struct net *net = dev_net(icmp_skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Eric Dumazet26e37362015-03-22 10:22:22 -0700439 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
440 th->dest, iph->saddr, ntohs(th->source),
David Ahern3fa6f612017-08-07 08:44:17 -0700441 inet_iif(icmp_skb), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 if (!sk) {
Eric Dumazet5d3848b2016-04-27 16:44:29 -0700443 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100444 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 }
446 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700447 inet_twsk_put(inet_twsk(sk));
Stefano Brivio32bbd872018-11-08 12:19:21 +0100448 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 }
Eric Dumazet26e37362015-03-22 10:22:22 -0700450 seq = ntohl(th->seq);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100451 if (sk->sk_state == TCP_NEW_SYN_RECV) {
452 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
453 type == ICMP_TIME_EXCEEDED ||
454 (type == ICMP_DEST_UNREACH &&
455 (code == ICMP_NET_UNREACH ||
456 code == ICMP_HOST_UNREACH)));
457 return 0;
458 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
460 bh_lock_sock(sk);
461 /* If too many ICMPs get dropped on busy
462 * servers this needs to be solved differently.
Eric Dumazet563d34d2012-07-23 09:48:52 +0200463 * We do take care of PMTU discovery (RFC1191) special case :
464 * we can receive locally generated ICMP messages while socket is held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 */
Eric Dumazetb74aa932013-01-19 16:10:37 +0000466 if (sock_owned_by_user(sk)) {
467 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700468 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Eric Dumazetb74aa932013-01-19 16:10:37 +0000469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 if (sk->sk_state == TCP_CLOSE)
471 goto out;
472
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000473 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700474 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000475 goto out;
476 }
477
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000478 icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700480 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
481 fastopen = tp->fastopen_rsk;
482 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700484 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700485 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 goto out;
487 }
488
489 switch (type) {
David S. Miller55be7a92012-07-11 21:27:49 -0700490 case ICMP_REDIRECT:
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100491 if (!sock_owned_by_user(sk))
492 do_redirect(icmp_skb, sk);
David S. Miller55be7a92012-07-11 21:27:49 -0700493 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 case ICMP_SOURCE_QUENCH:
495 /* Just silently ignore these. */
496 goto out;
497 case ICMP_PARAMETERPROB:
498 err = EPROTO;
499 break;
500 case ICMP_DEST_UNREACH:
501 if (code > NR_ICMP_UNREACH)
502 goto out;
503
504 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000505 /* We are not interested in TCP_LISTEN and open_requests
506 * (SYN-ACKs send out by Linux are always <576bytes so
507 * they should go through unfragmented).
508 */
509 if (sk->sk_state == TCP_LISTEN)
510 goto out;
511
Eric Dumazet563d34d2012-07-23 09:48:52 +0200512 tp->mtu_info = info;
Eric Dumazet144d56e2012-08-20 00:22:46 +0000513 if (!sock_owned_by_user(sk)) {
Eric Dumazet563d34d2012-07-23 09:48:52 +0200514 tcp_v4_mtu_reduced(sk);
Eric Dumazet144d56e2012-08-20 00:22:46 +0000515 } else {
Eric Dumazet7aa54702016-12-03 11:14:57 -0800516 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
Eric Dumazet144d56e2012-08-20 00:22:46 +0000517 sock_hold(sk);
518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 goto out;
520 }
521
522 err = icmp_err_convert[code].errno;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000523 /* check if icmp_skb allows revert of backoff
524 * (see draft-zimmermann-tcp-lcd) */
525 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
526 break;
527 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700528 !icsk->icsk_backoff || fastopen)
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000529 break;
530
David S. Miller8f49c272010-11-12 13:35:00 -0800531 if (sock_owned_by_user(sk))
532 break;
533
Eric Dumazet2c4cc972019-02-15 13:36:21 -0800534 skb = tcp_rtx_queue_head(sk);
535 if (WARN_ON_ONCE(!skb))
536 break;
537
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000538 icsk->icsk_backoff--;
Eric Dumazetfcdd1cf2014-09-22 13:19:44 -0700539 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
540 TCP_TIMEOUT_INIT;
541 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000542
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000543
Eric Dumazet9a568de2017-05-16 14:00:14 -0700544 tcp_mstamp_refresh(tp);
Eric Dumazet2fd66ff2018-09-21 08:51:47 -0700545 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
Eric Dumazet7faee5c2014-09-05 15:33:33 -0700546 remaining = icsk->icsk_rto -
Eric Dumazet9a568de2017-05-16 14:00:14 -0700547 usecs_to_jiffies(delta_us);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000548
Eric Dumazet9a568de2017-05-16 14:00:14 -0700549 if (remaining > 0) {
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000550 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
551 remaining, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000552 } else {
553 /* RTO revert clocked out retransmission.
554 * Will retransmit now */
555 tcp_retransmit_timer(sk);
556 }
557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 break;
559 case ICMP_TIME_EXCEEDED:
560 err = EHOSTUNREACH;
561 break;
562 default:
563 goto out;
564 }
565
566 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700568 case TCP_SYN_RECV:
569 /* Only in fast or simultaneous open. If a fast open socket is
570 * is already accepted it is treated as a connected one below.
571 */
Ian Morris51456b22015-04-03 09:17:26 +0100572 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700573 break;
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 sk->sk_err = err;
577
578 sk->sk_error_report(sk);
579
580 tcp_done(sk);
581 } else {
582 sk->sk_err_soft = err;
583 }
584 goto out;
585 }
586
587 /* If we've already connected we will keep trying
588 * until we time out, or the user gives up.
589 *
590 * rfc1122 4.2.3.9 allows to consider as hard errors
591 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
592 * but it is obsoleted by pmtu discovery).
593 *
594 * Note, that in modern internet, where routing is unreliable
595 * and in each dark corner broken firewalls sit, sending random
596 * errors ordered by their masters even this two messages finally lose
597 * their original sense (even Linux sends invalid PORT_UNREACHs)
598 *
599 * Now we are in compliance with RFCs.
600 * --ANK (980905)
601 */
602
603 inet = inet_sk(sk);
604 if (!sock_owned_by_user(sk) && inet->recverr) {
605 sk->sk_err = err;
606 sk->sk_error_report(sk);
607 } else { /* Only an error on timeout */
608 sk->sk_err_soft = err;
609 }
610
611out:
612 bh_unlock_sock(sk);
613 sock_put(sk);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100614 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615}
616
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000617void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700619 struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
Eric Dumazet98be9b12018-02-19 11:56:52 -0800621 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
622 skb->csum_start = skb_transport_header(skb) - skb->head;
623 skb->csum_offset = offsetof(struct tcphdr, check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624}
625
Herbert Xu419f9f82010-04-11 02:15:53 +0000626/* This routine computes an IPv4 TCP checksum. */
Herbert Xubb296242010-04-11 02:15:55 +0000627void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
Herbert Xu419f9f82010-04-11 02:15:53 +0000628{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400629 const struct inet_sock *inet = inet_sk(sk);
Herbert Xu419f9f82010-04-11 02:15:53 +0000630
631 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
632}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000633EXPORT_SYMBOL(tcp_v4_send_check);
Herbert Xu419f9f82010-04-11 02:15:53 +0000634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635/*
636 * This routine will send an RST to the other tcp.
637 *
638 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
639 * for reset.
640 * Answer: if a packet caused RST, it is not for a socket
641 * existing in our system, if it is matched to a socket,
642 * it is just duplicate segment or bug in other side's TCP.
643 * So that we build reply only basing on parameters
644 * arrived with segment.
645 * Exception: precedence violation. We do not implement it in any case.
646 */
647
Eric Dumazeta00e7442015-09-29 07:42:39 -0700648static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400650 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800651 struct {
652 struct tcphdr th;
653#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800654 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800655#endif
656 } rep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 struct ip_reply_arg arg;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800658#ifdef CONFIG_TCP_MD5SIG
Florian Westphale46787f2015-12-21 21:29:25 +0100659 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000660 const __u8 *hash_location = NULL;
661 unsigned char newhash[16];
662 int genhash;
663 struct sock *sk1 = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800664#endif
Pavel Emelyanova86b1e32008-07-16 20:20:58 -0700665 struct net *net;
Jon Maxwell00483692018-05-10 16:53:51 +1000666 struct sock *ctl_sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668 /* Never send a reset in response to a reset. */
669 if (th->rst)
670 return;
671
Eric Dumazetc3658e82014-11-25 07:40:04 -0800672 /* If sk not NULL, it means we did a successful lookup and incoming
673 * route had to be correct. prequeue might have dropped our dst.
674 */
675 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return;
677
678 /* Swap the send and the receive. */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800679 memset(&rep, 0, sizeof(rep));
680 rep.th.dest = th->source;
681 rep.th.source = th->dest;
682 rep.th.doff = sizeof(struct tcphdr) / 4;
683 rep.th.rst = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
685 if (th->ack) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800686 rep.th.seq = th->ack_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800688 rep.th.ack = 1;
689 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
690 skb->len - (th->doff << 2));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 }
692
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200693 memset(&arg, 0, sizeof(arg));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800694 arg.iov[0].iov_base = (unsigned char *)&rep;
695 arg.iov[0].iov_len = sizeof(rep.th);
696
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800697 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800698#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700699 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000700 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100701 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100702 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
703 &ip_hdr(skb)->saddr, AF_INET);
704 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000705 /*
706 * active side is lost. Try to find listening socket through
707 * source port, and then find md5 key through listening socket.
708 * we are not loose security here:
709 * Incoming packet is checked with md5 hash with finding key,
710 * no RST generated if md5 hash doesn't match.
711 */
Craig Galleka5836362016-02-10 11:50:38 -0500712 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
713 ip_hdr(skb)->saddr,
Tom Herbertda5e3632013-01-22 09:50:24 +0000714 th->source, ip_hdr(skb)->daddr,
David Ahern3fa6f612017-08-07 08:44:17 -0700715 ntohs(th->source), inet_iif(skb),
716 tcp_v4_sdif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000717 /* don't send rst if it can't find key */
718 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700719 goto out;
720
Shawn Lu658ddaa2012-01-31 22:35:48 +0000721 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
722 &ip_hdr(skb)->saddr, AF_INET);
723 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700724 goto out;
725
Shawn Lu658ddaa2012-01-31 22:35:48 +0000726
Eric Dumazet39f8e582015-03-24 15:58:55 -0700727 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000728 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700729 goto out;
730
Shawn Lu658ddaa2012-01-31 22:35:48 +0000731 }
732
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800733 if (key) {
734 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
735 (TCPOPT_NOP << 16) |
736 (TCPOPT_MD5SIG << 8) |
737 TCPOLEN_MD5SIG);
738 /* Update length and the length the header thinks exists */
739 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
740 rep.th.doff = arg.iov[0].iov_len / 4;
741
Adam Langley49a72df2008-07-19 00:01:42 -0700742 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
Ilpo Järvinen78e645cb2008-10-09 14:37:47 -0700743 key, ip_hdr(skb)->saddr,
744 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800745 }
746#endif
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700747 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
748 ip_hdr(skb)->saddr, /* XXX */
Ilpo Järvinen52cd5752008-10-08 11:34:06 -0700749 arg.iov[0].iov_len, IPPROTO_TCP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
Florian Westphal271c3b92015-12-21 21:29:26 +0100751 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
752
Shawn Lue2446ea2012-02-04 12:38:09 +0000753 /* When socket is gone, all binding information is lost.
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000754 * routing might fail in this case. No choice here, if we choose to force
755 * input interface, we will misroute in case of asymmetric route.
Shawn Lue2446ea2012-02-04 12:38:09 +0000756 */
Song Liuc24b14c42017-10-23 09:20:24 -0700757 if (sk) {
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000758 arg.bound_dev_if = sk->sk_bound_dev_if;
Song Liu5c487bb2018-02-06 20:50:23 -0800759 if (sk_fullsock(sk))
760 trace_tcp_send_reset(sk, skb);
Song Liuc24b14c42017-10-23 09:20:24 -0700761 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Florian Westphal271c3b92015-12-21 21:29:26 +0100763 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
764 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
765
Eric Dumazet66b13d92011-10-24 03:06:21 -0400766 arg.tos = ip_hdr(skb)->tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900767 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700768 local_bh_disable();
Eric Dumazet5472c3c2019-05-31 19:17:33 -0700769 ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
Eric Dumazeta842fe12019-06-12 11:57:25 -0700770 if (sk) {
Jon Maxwell00483692018-05-10 16:53:51 +1000771 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
772 inet_twsk(sk)->tw_mark : sk->sk_mark;
Eric Dumazeta842fe12019-06-12 11:57:25 -0700773 tcp_set_tx_time(skb, sk);
774 }
Jon Maxwell00483692018-05-10 16:53:51 +1000775 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800776 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700777 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
778 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
Jon Maxwell00483692018-05-10 16:53:51 +1000780 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700781 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
782 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700783 local_bh_enable();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000784
785#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700786out:
787 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000788#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789}
790
791/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
792 outside socket context is ugly, certainly. What can I do?
793 */
794
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900795static void tcp_v4_send_ack(const struct sock *sk,
Eric Dumazete62a1232016-01-21 08:02:54 -0800796 struct sk_buff *skb, u32 seq, u32 ack,
Andrey Vaginee684b62013-02-11 05:50:19 +0000797 u32 win, u32 tsval, u32 tsecr, int oif,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700798 struct tcp_md5sig_key *key,
Eric Dumazet66b13d92011-10-24 03:06:21 -0400799 int reply_flags, u8 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400801 const struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 struct {
803 struct tcphdr th;
Al Viro714e85b2006-11-14 20:51:49 -0800804 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800805#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800806 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800807#endif
808 ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 } rep;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900810 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 struct ip_reply_arg arg;
Jon Maxwell00483692018-05-10 16:53:51 +1000812 struct sock *ctl_sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
814 memset(&rep.th, 0, sizeof(struct tcphdr));
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200815 memset(&arg, 0, sizeof(arg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
817 arg.iov[0].iov_base = (unsigned char *)&rep;
818 arg.iov[0].iov_len = sizeof(rep.th);
Andrey Vaginee684b62013-02-11 05:50:19 +0000819 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800820 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
821 (TCPOPT_TIMESTAMP << 8) |
822 TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000823 rep.opt[1] = htonl(tsval);
824 rep.opt[2] = htonl(tsecr);
Craig Schlentercb48cfe2007-01-09 00:11:15 -0800825 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 }
827
828 /* Swap the send and the receive. */
829 rep.th.dest = th->source;
830 rep.th.source = th->dest;
831 rep.th.doff = arg.iov[0].iov_len / 4;
832 rep.th.seq = htonl(seq);
833 rep.th.ack_seq = htonl(ack);
834 rep.th.ack = 1;
835 rep.th.window = htons(win);
836
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800837#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800838 if (key) {
Andrey Vaginee684b62013-02-11 05:50:19 +0000839 int offset = (tsecr) ? 3 : 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800840
841 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
842 (TCPOPT_NOP << 16) |
843 (TCPOPT_MD5SIG << 8) |
844 TCPOLEN_MD5SIG);
845 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
846 rep.th.doff = arg.iov[0].iov_len/4;
847
Adam Langley49a72df2008-07-19 00:01:42 -0700848 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
Adam Langley90b7e112008-07-31 20:49:48 -0700849 key, ip_hdr(skb)->saddr,
850 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800851 }
852#endif
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700853 arg.flags = reply_flags;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700854 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
855 ip_hdr(skb)->saddr, /* XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 arg.iov[0].iov_len, IPPROTO_TCP, 0);
857 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900858 if (oif)
859 arg.bound_dev_if = oif;
Eric Dumazet66b13d92011-10-24 03:06:21 -0400860 arg.tos = tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900861 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700862 local_bh_disable();
Eric Dumazet5472c3c2019-05-31 19:17:33 -0700863 ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
Eric Dumazeta842fe12019-06-12 11:57:25 -0700864 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
865 inet_twsk(sk)->tw_mark : sk->sk_mark;
866 tcp_set_tx_time(skb, sk);
Jon Maxwell00483692018-05-10 16:53:51 +1000867 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800868 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700869 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
870 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
Jon Maxwell00483692018-05-10 16:53:51 +1000872 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700873 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700874 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875}
876
877static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
878{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700879 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800880 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900882 tcp_v4_send_ack(sk, skb,
Eric Dumazete62a1232016-01-21 08:02:54 -0800883 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200884 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700885 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900886 tcptw->tw_ts_recent,
887 tw->tw_bound_dev_if,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700888 tcp_twsk_md5_key(tcptw),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400889 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
890 tw->tw_tos
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900891 );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700893 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894}
895
Eric Dumazeta00e7442015-09-29 07:42:39 -0700896static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200897 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898{
Jerry Chu168a8f52012-08-31 12:29:13 +0000899 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
900 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
901 */
Eric Dumazete62a1232016-01-21 08:02:54 -0800902 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
903 tcp_sk(sk)->snd_nxt;
904
Eric Dumazet20a2b492016-08-22 11:31:10 -0700905 /* RFC 7323 2.3
906 * The window field (SEG.WND) of every outgoing segment, with the
907 * exception of <SYN> segments, MUST be right-shifted by
908 * Rcv.Wind.Shift bits:
909 */
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900910 tcp_v4_send_ack(sk, skb, seq,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700911 tcp_rsk(req)->rcv_nxt,
912 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700913 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900914 req->ts_recent,
915 0,
Christoph Paasch30791ac2017-12-11 00:05:46 -0800916 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000917 AF_INET),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400918 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
919 ip_hdr(skb)->tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920}
921
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922/*
Kris Katterjohn9bf1d832008-02-17 22:29:19 -0800923 * Send a SYN-ACK after having received a SYN.
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700924 * This still operates on a request_sock only, not on a big
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 * socket.
926 */
Eric Dumazet0f935db2015-09-25 07:39:21 -0700927static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300928 struct flowi *fl,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800929 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700930 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700931 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700933 const struct inet_request_sock *ireq = inet_rsk(req);
David S. Miller6bd023f2011-05-18 18:32:03 -0400934 struct flowi4 fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 int err = -1;
Weilong Chend41db5a2013-12-23 14:37:28 +0800936 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937
938 /* First, grab a route. */
David S. Millerba3f7f02012-07-17 14:02:46 -0700939 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800940 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
Eric Dumazetb3d05142016-04-13 22:05:39 -0700942 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
944 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700945 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946
Eric Dumazet2ab2ddd2018-10-02 12:35:05 -0700947 rcu_read_lock();
Eric Dumazet634fb9792013-10-09 15:21:29 -0700948 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
949 ireq->ir_rmt_addr,
Eric Dumazet2ab2ddd2018-10-02 12:35:05 -0700950 rcu_dereference(ireq->ireq_opt));
951 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200952 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 }
954
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 return err;
956}
957
958/*
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700959 * IPv4 request_sock destructor.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 */
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700961static void tcp_v4_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962{
Eric Dumazetc92e8c02017-10-20 09:04:13 -0700963 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964}
965
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800966#ifdef CONFIG_TCP_MD5SIG
967/*
968 * RFC2385 MD5 checksumming requires a mapping of
969 * IP address->MD5 Key.
970 * We need to maintain these in the sk structure.
971 */
972
Eric Dumazet921f9a02019-02-26 09:49:11 -0800973DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
Eric Dumazet6015c712018-11-27 15:03:21 -0800974EXPORT_SYMBOL(tcp_md5_needed);
975
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800976/* Find the Key structure for an address. */
Eric Dumazet6015c712018-11-27 15:03:21 -0800977struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
978 const union tcp_md5_addr *addr,
979 int family)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800980{
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700981 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000982 struct tcp_md5sig_key *key;
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700983 const struct tcp_md5sig_info *md5sig;
Ivan Delalande67973182017-06-15 18:07:06 -0700984 __be32 mask;
985 struct tcp_md5sig_key *best_match = NULL;
986 bool match;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800987
Eric Dumazeta8afca02012-01-31 18:45:40 +0000988 /* caller either holds rcu_read_lock() or socket lock */
989 md5sig = rcu_dereference_check(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200990 lockdep_sock_is_held(sk));
Eric Dumazeta8afca02012-01-31 18:45:40 +0000991 if (!md5sig)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800992 return NULL;
Arnd Bergmann083a0322017-06-20 22:11:21 +0200993
Sasha Levinb67bfe02013-02-27 17:06:00 -0800994 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000995 if (key->family != family)
996 continue;
Ivan Delalande67973182017-06-15 18:07:06 -0700997
998 if (family == AF_INET) {
999 mask = inet_make_mask(key->prefixlen);
1000 match = (key->addr.a4.s_addr & mask) ==
1001 (addr->a4.s_addr & mask);
1002#if IS_ENABLED(CONFIG_IPV6)
1003 } else if (family == AF_INET6) {
1004 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1005 key->prefixlen);
1006#endif
1007 } else {
1008 match = false;
1009 }
1010
1011 if (match && (!best_match ||
1012 key->prefixlen > best_match->prefixlen))
1013 best_match = key;
1014 }
1015 return best_match;
1016}
Eric Dumazet6015c712018-11-27 15:03:21 -08001017EXPORT_SYMBOL(__tcp_md5_do_lookup);
Ivan Delalande67973182017-06-15 18:07:06 -07001018
Wu Fengguange8f37d52017-07-06 07:58:53 +08001019static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1020 const union tcp_md5_addr *addr,
1021 int family, u8 prefixlen)
Ivan Delalande67973182017-06-15 18:07:06 -07001022{
1023 const struct tcp_sock *tp = tcp_sk(sk);
1024 struct tcp_md5sig_key *key;
1025 unsigned int size = sizeof(struct in_addr);
1026 const struct tcp_md5sig_info *md5sig;
1027
1028 /* caller either holds rcu_read_lock() or socket lock */
1029 md5sig = rcu_dereference_check(tp->md5sig_info,
1030 lockdep_sock_is_held(sk));
1031 if (!md5sig)
1032 return NULL;
1033#if IS_ENABLED(CONFIG_IPV6)
1034 if (family == AF_INET6)
1035 size = sizeof(struct in6_addr);
1036#endif
1037 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1038 if (key->family != family)
1039 continue;
1040 if (!memcmp(&key->addr, addr, size) &&
1041 key->prefixlen == prefixlen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001042 return key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001043 }
1044 return NULL;
1045}
1046
Eric Dumazetb83e3de2015-09-25 07:39:15 -07001047struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001048 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001049{
Eric Dumazetb52e6922015-04-09 14:36:42 -07001050 const union tcp_md5_addr *addr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001051
Eric Dumazetb52e6922015-04-09 14:36:42 -07001052 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001053 return tcp_md5_do_lookup(sk, addr, AF_INET);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001054}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001055EXPORT_SYMBOL(tcp_v4_md5_lookup);
1056
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001057/* This can be called on a newly created socket, from other files */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001058int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
Ivan Delalande67973182017-06-15 18:07:06 -07001059 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1060 gfp_t gfp)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001061{
1062 /* Add Key to the list */
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001063 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001064 struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001065 struct tcp_md5sig_info *md5sig;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001066
Ivan Delalande67973182017-06-15 18:07:06 -07001067 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001068 if (key) {
1069 /* Pre-existing entry - just update that one. */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001070 memcpy(key->key, newkey, newkeylen);
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001071 key->keylen = newkeylen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001072 return 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001073 }
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001074
Eric Dumazeta8afca02012-01-31 18:45:40 +00001075 md5sig = rcu_dereference_protected(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +02001076 lockdep_sock_is_held(sk));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001077 if (!md5sig) {
1078 md5sig = kmalloc(sizeof(*md5sig), gfp);
1079 if (!md5sig)
1080 return -ENOMEM;
1081
1082 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1083 INIT_HLIST_HEAD(&md5sig->head);
Eric Dumazeta8afca02012-01-31 18:45:40 +00001084 rcu_assign_pointer(tp->md5sig_info, md5sig);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001085 }
1086
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001087 key = sock_kmalloc(sk, sizeof(*key), gfp);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001088 if (!key)
1089 return -ENOMEM;
Eric Dumazet71cea172013-05-20 06:52:26 +00001090 if (!tcp_alloc_md5sig_pool()) {
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001091 sock_kfree_s(sk, key, sizeof(*key));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001092 return -ENOMEM;
1093 }
1094
1095 memcpy(key->key, newkey, newkeylen);
1096 key->keylen = newkeylen;
1097 key->family = family;
Ivan Delalande67973182017-06-15 18:07:06 -07001098 key->prefixlen = prefixlen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001099 memcpy(&key->addr, addr,
1100 (family == AF_INET6) ? sizeof(struct in6_addr) :
1101 sizeof(struct in_addr));
1102 hlist_add_head_rcu(&key->node, &md5sig->head);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001103 return 0;
1104}
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001105EXPORT_SYMBOL(tcp_md5_do_add);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001106
Ivan Delalande67973182017-06-15 18:07:06 -07001107int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1108 u8 prefixlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001109{
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001110 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001111
Ivan Delalande67973182017-06-15 18:07:06 -07001112 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001113 if (!key)
1114 return -ENOENT;
1115 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001116 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001117 kfree_rcu(key, rcu);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001118 return 0;
1119}
1120EXPORT_SYMBOL(tcp_md5_do_del);
1121
stephen hemmingere0683e702012-10-26 14:31:40 +00001122static void tcp_clear_md5_list(struct sock *sk)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001123{
1124 struct tcp_sock *tp = tcp_sk(sk);
1125 struct tcp_md5sig_key *key;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001126 struct hlist_node *n;
Eric Dumazeta8afca02012-01-31 18:45:40 +00001127 struct tcp_md5sig_info *md5sig;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001128
Eric Dumazeta8afca02012-01-31 18:45:40 +00001129 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1130
Sasha Levinb67bfe02013-02-27 17:06:00 -08001131 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001132 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001133 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001134 kfree_rcu(key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001135 }
1136}
1137
Ivan Delalande8917a772017-06-15 18:07:07 -07001138static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1139 char __user *optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001140{
1141 struct tcp_md5sig cmd;
1142 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
Ivan Delalande8917a772017-06-15 18:07:07 -07001143 u8 prefixlen = 32;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001144
1145 if (optlen < sizeof(cmd))
1146 return -EINVAL;
1147
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02001148 if (copy_from_user(&cmd, optval, sizeof(cmd)))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001149 return -EFAULT;
1150
1151 if (sin->sin_family != AF_INET)
1152 return -EINVAL;
1153
Ivan Delalande8917a772017-06-15 18:07:07 -07001154 if (optname == TCP_MD5SIG_EXT &&
1155 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1156 prefixlen = cmd.tcpm_prefixlen;
1157 if (prefixlen > 32)
1158 return -EINVAL;
1159 }
1160
Dmitry Popov64a124e2014-08-03 22:45:19 +04001161 if (!cmd.tcpm_keylen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001162 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001163 AF_INET, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001164
1165 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1166 return -EINVAL;
1167
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001168 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001169 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001170 GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001171}
1172
Eric Dumazet19689e32016-06-27 18:51:53 +02001173static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1174 __be32 daddr, __be32 saddr,
1175 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001176{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001177 struct tcp4_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -07001178 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +02001179 struct tcphdr *_th;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001180
Eric Dumazet19689e32016-06-27 18:51:53 +02001181 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001182 bp->saddr = saddr;
1183 bp->daddr = daddr;
1184 bp->pad = 0;
YOSHIFUJI Hideaki076fb722008-04-17 12:48:12 +09001185 bp->protocol = IPPROTO_TCP;
Adam Langley49a72df2008-07-19 00:01:42 -07001186 bp->len = cpu_to_be16(nbytes);
David S. Millerc7da57a2007-10-26 00:41:21 -07001187
Eric Dumazet19689e32016-06-27 18:51:53 +02001188 _th = (struct tcphdr *)(bp + 1);
1189 memcpy(_th, th, sizeof(*th));
1190 _th->check = 0;
1191
1192 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1193 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1194 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +08001195 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -07001196}
1197
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001198static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001199 __be32 daddr, __be32 saddr, const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -07001200{
1201 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001202 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -07001203
1204 hp = tcp_get_md5sig_pool();
1205 if (!hp)
1206 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001207 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001208
Herbert Xucf80e0e2016-01-24 21:20:23 +08001209 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001210 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +02001211 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -07001212 goto clear_hash;
1213 if (tcp_md5_hash_key(hp, key))
1214 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001215 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1216 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001217 goto clear_hash;
1218
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001219 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001220 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -07001221
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001222clear_hash:
1223 tcp_put_md5sig_pool();
1224clear_hash_noput:
1225 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -07001226 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001227}
1228
Eric Dumazet39f8e582015-03-24 15:58:55 -07001229int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1230 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001231 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001232{
Adam Langley49a72df2008-07-19 00:01:42 -07001233 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001234 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001235 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001236 __be32 saddr, daddr;
1237
Eric Dumazet39f8e582015-03-24 15:58:55 -07001238 if (sk) { /* valid for establish/request sockets */
1239 saddr = sk->sk_rcv_saddr;
1240 daddr = sk->sk_daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001241 } else {
Adam Langley49a72df2008-07-19 00:01:42 -07001242 const struct iphdr *iph = ip_hdr(skb);
1243 saddr = iph->saddr;
1244 daddr = iph->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001245 }
Adam Langley49a72df2008-07-19 00:01:42 -07001246
1247 hp = tcp_get_md5sig_pool();
1248 if (!hp)
1249 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001250 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001251
Herbert Xucf80e0e2016-01-24 21:20:23 +08001252 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001253 goto clear_hash;
1254
Eric Dumazet19689e32016-06-27 18:51:53 +02001255 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -07001256 goto clear_hash;
1257 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1258 goto clear_hash;
1259 if (tcp_md5_hash_key(hp, key))
1260 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001261 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1262 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001263 goto clear_hash;
1264
1265 tcp_put_md5sig_pool();
1266 return 0;
1267
1268clear_hash:
1269 tcp_put_md5sig_pool();
1270clear_hash_noput:
1271 memset(md5_hash, 0, 16);
1272 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001273}
Adam Langley49a72df2008-07-19 00:01:42 -07001274EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001275
Eric Dumazetba8e2752015-10-02 11:43:28 -07001276#endif
1277
Eric Dumazetff74e232015-03-24 15:58:54 -07001278/* Called with rcu_read_lock() */
Eric Dumazetba8e2752015-10-02 11:43:28 -07001279static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
Eric Dumazetff74e232015-03-24 15:58:54 -07001280 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001281{
Eric Dumazetba8e2752015-10-02 11:43:28 -07001282#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001283 /*
1284 * This gets called for each TCP segment that arrives
1285 * so we want to be efficient.
1286 * We have 3 drop cases:
1287 * o No MD5 hash and one expected.
1288 * o MD5 hash and we're not expecting one.
1289 * o MD5 hash and its wrong.
1290 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001291 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001292 struct tcp_md5sig_key *hash_expected;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001293 const struct iphdr *iph = ip_hdr(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001294 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001295 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001296 unsigned char newhash[16];
1297
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001298 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1299 AF_INET);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +09001300 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001301
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001302 /* We've parsed the options - do we have a hash? */
1303 if (!hash_expected && !hash_location)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001304 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001305
1306 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001307 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001308 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001309 }
1310
1311 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001312 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001313 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001314 }
1315
1316 /* Okay, so this is hash_expected and hash_location -
1317 * so we need to calculate the checksum.
1318 */
Adam Langley49a72df2008-07-19 00:01:42 -07001319 genhash = tcp_v4_md5_hash_skb(newhash,
1320 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -07001321 NULL, skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001322
1323 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -07001324 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +00001325 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1326 &iph->saddr, ntohs(th->source),
1327 &iph->daddr, ntohs(th->dest),
1328 genhash ? " tcp_v4_calc_md5_hash failed"
1329 : "");
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001330 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001331 }
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001332 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001333#endif
Eric Dumazetba8e2752015-10-02 11:43:28 -07001334 return false;
1335}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001336
Eric Dumazetb40cf182015-09-25 07:39:08 -07001337static void tcp_v4_init_req(struct request_sock *req,
1338 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +03001339 struct sk_buff *skb)
1340{
1341 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001342 struct net *net = sock_net(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +03001343
Eric Dumazet08d2cc3b2015-03-18 14:05:38 -07001344 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1345 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001346 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
Octavian Purdila16bea702014-06-25 17:09:53 +03001347}
1348
Eric Dumazetf9646292015-09-29 07:42:50 -07001349static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1350 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001351 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +03001352{
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001353 return inet_csk_route_req(sk, &fl->u.ip4, req);
Octavian Purdilad94e0412014-06-25 17:09:55 +03001354}
1355
Eric Dumazet72a3eff2006-11-16 02:30:37 -08001356struct request_sock_ops tcp_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 .family = PF_INET,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001358 .obj_size = sizeof(struct tcp_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +03001359 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001360 .send_ack = tcp_v4_reqsk_send_ack,
1361 .destructor = tcp_v4_reqsk_destructor,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 .send_reset = tcp_v4_send_reset,
stephen hemminger688d1942014-08-29 23:32:05 -07001363 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364};
1365
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001366static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +03001367 .mss_clamp = TCP_MSS_DEFAULT,
Octavian Purdila16bea702014-06-25 17:09:53 +03001368#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001369 .req_md5_lookup = tcp_v4_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +00001370 .calc_md5_hash = tcp_v4_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -08001371#endif
Octavian Purdila16bea702014-06-25 17:09:53 +03001372 .init_req = tcp_v4_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001373#ifdef CONFIG_SYN_COOKIES
1374 .cookie_init_seq = cookie_v4_init_sequence,
1375#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +03001376 .route_req = tcp_v4_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -07001377 .init_seq = tcp_v4_init_seq,
1378 .init_ts_off = tcp_v4_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +03001379 .send_synack = tcp_v4_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +03001380};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1383{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 /* Never answer to SYNs send to broadcast or multicast */
Eric Dumazet511c3f92009-06-02 05:14:27 +00001385 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 goto drop;
1387
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001388 return tcp_conn_request(&tcp_request_sock_ops,
1389 &tcp_request_sock_ipv4_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001392 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 return 0;
1394}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001395EXPORT_SYMBOL(tcp_v4_conn_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396
1397
1398/*
1399 * The three way handshake has completed - we got a valid synack -
1400 * now create the new socket.
1401 */
Eric Dumazet0c271712015-09-29 07:42:48 -07001402struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001403 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001404 struct dst_entry *dst,
1405 struct request_sock *req_unhash,
1406 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001408 struct inet_request_sock *ireq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 struct inet_sock *newinet;
1410 struct tcp_sock *newtp;
1411 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001412#ifdef CONFIG_TCP_MD5SIG
1413 struct tcp_md5sig_key *key;
1414#endif
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001415 struct ip_options_rcu *inet_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
1417 if (sk_acceptq_is_full(sk))
1418 goto exit_overflow;
1419
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 newsk = tcp_create_openreq_child(sk, req, skb);
1421 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001422 goto exit_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423
Herbert Xubcd76112006-06-30 13:36:35 -07001424 newsk->sk_gso_type = SKB_GSO_TCPV4;
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001425 inet_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426
1427 newtp = tcp_sk(newsk);
1428 newinet = inet_sk(newsk);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001429 ireq = inet_rsk(req);
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001430 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1431 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
David Ahern6dd9a142015-12-16 13:20:44 -08001432 newsk->sk_bound_dev_if = ireq->ir_iif;
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001433 newinet->inet_saddr = ireq->ir_loc_addr;
1434 inet_opt = rcu_dereference(ireq->ireq_opt);
1435 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001436 newinet->mc_index = inet_iif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001437 newinet->mc_ttl = ip_hdr(skb)->ttl;
Jiri Benc4c507d22012-02-09 09:35:49 +00001438 newinet->rcv_tos = ip_hdr(skb)->tos;
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001439 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001440 if (inet_opt)
1441 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001442 newinet->inet_id = newtp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
Eric Dumazetdfd25ff2012-03-10 09:20:21 +00001444 if (!dst) {
1445 dst = inet_csk_route_child_sock(sk, newsk, req);
1446 if (!dst)
1447 goto put_and_exit;
1448 } else {
1449 /* syncookie case : see end of cookie_v4_check() */
1450 }
David S. Miller0e734412011-05-08 15:28:03 -07001451 sk_setup_caps(newsk, dst);
1452
Daniel Borkmann81164412015-01-05 23:57:48 +01001453 tcp_ca_openreq_child(newsk, dst);
1454
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001456 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Tom Quetchenbachf5fff5d2008-09-21 00:21:51 -07001457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 tcp_initialize_rcv_mss(newsk);
1459
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001460#ifdef CONFIG_TCP_MD5SIG
1461 /* Copy over the MD5 key from the original socket */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001462 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1463 AF_INET);
Ian Morris00db4122015-04-03 09:17:27 +01001464 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001465 /*
1466 * We're using one, so create a matching key
1467 * on the newsk structure. If we fail to get
1468 * memory, then we end up not copying the key
1469 * across. Shucks.
1470 */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001471 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
Ivan Delalande67973182017-06-15 18:07:06 -07001472 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
Eric Dumazeta4654192010-05-16 00:36:33 -07001473 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001474 }
1475#endif
1476
David S. Miller0e734412011-05-08 15:28:03 -07001477 if (__inet_inherit_port(sk, newsk) < 0)
1478 goto put_and_exit;
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001479 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001480 if (likely(*own_req)) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001481 tcp_move_syn(newtp, req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001482 ireq->ireq_opt = NULL;
1483 } else {
1484 newinet->inet_opt = NULL;
1485 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 return newsk;
1487
1488exit_overflow:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001489 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001490exit_nonewsk:
1491 dst_release(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492exit:
Eric Dumazet9caad862016-04-01 08:52:20 -07001493 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 return NULL;
David S. Miller0e734412011-05-08 15:28:03 -07001495put_and_exit:
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001496 newinet->inet_opt = NULL;
Christoph Paasche337e242012-12-14 04:07:58 +00001497 inet_csk_prepare_forced_close(newsk);
1498 tcp_done(newsk);
David S. Miller0e734412011-05-08 15:28:03 -07001499 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001501EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
Eric Dumazet079096f2015-10-02 11:43:32 -07001503static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001506 const struct tcphdr *th = tcp_hdr(skb);
1507
Florian Westphalaf9b4732010-06-03 00:43:44 +00001508 if (!th->syn)
Cong Wang461b74c2014-10-15 14:33:22 -07001509 sk = cookie_v4_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510#endif
1511 return sk;
1512}
1513
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001515 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001516 *
1517 * We have a potential double-lock case here, so even when
1518 * doing backlog processing we use the BH locking scheme.
1519 * This is because we cannot sleep with the original spinlock
1520 * held.
1521 */
1522int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1523{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001524 struct sock *rsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet404e0a82012-07-29 23:20:37 +00001527 struct dst_entry *dst = sk->sk_rx_dst;
1528
Tom Herbertbdeab992011-08-14 19:45:55 +00001529 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001530 sk_mark_napi_id(sk, skb);
Eric Dumazet404e0a82012-07-29 23:20:37 +00001531 if (dst) {
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001532 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
Ian Morris51456b22015-04-03 09:17:26 +01001533 !dst->ops->check(dst, 0)) {
David S. Miller92101b32012-07-23 16:29:00 -07001534 dst_release(dst);
1535 sk->sk_rx_dst = NULL;
1536 }
1537 }
Yafang Shao3d97d882018-05-29 23:27:31 +08001538 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 return 0;
1540 }
1541
Eric Dumazet12e25e12015-06-03 23:49:21 -07001542 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 goto csum_err;
1544
1545 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001546 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1547
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 if (!nsk)
1549 goto discard;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 if (nsk != sk) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001551 if (tcp_child_process(sk, nsk, skb)) {
1552 rsk = nsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001554 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 return 0;
1556 }
Eric Dumazetca551582010-06-03 09:03:58 +00001557 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001558 sock_rps_save_rxhash(sk, skb);
Eric Dumazetca551582010-06-03 09:03:58 +00001559
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001560 if (tcp_rcv_state_process(sk, skb)) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001561 rsk = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001563 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564 return 0;
1565
1566reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001567 tcp_v4_send_reset(rsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568discard:
1569 kfree_skb(skb);
1570 /* Be careful here. If this function gets more complicated and
1571 * gcc suffers from register pressure on the x86, sk (in %ebx)
1572 * might be destroyed here. This current version compiles correctly,
1573 * but you have been warned.
1574 */
1575 return 0;
1576
1577csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001578 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1579 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580 goto discard;
1581}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001582EXPORT_SYMBOL(tcp_v4_do_rcv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583
Paolo Abeni74874492017-09-28 15:51:36 +02001584int tcp_v4_early_demux(struct sk_buff *skb)
David S. Miller41063e92012-06-19 21:22:05 -07001585{
David S. Miller41063e92012-06-19 21:22:05 -07001586 const struct iphdr *iph;
1587 const struct tcphdr *th;
1588 struct sock *sk;
David S. Miller41063e92012-06-19 21:22:05 -07001589
David S. Miller41063e92012-06-19 21:22:05 -07001590 if (skb->pkt_type != PACKET_HOST)
Paolo Abeni74874492017-09-28 15:51:36 +02001591 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001592
Eric Dumazet45f00f92012-10-22 21:42:47 +00001593 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
Paolo Abeni74874492017-09-28 15:51:36 +02001594 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001595
1596 iph = ip_hdr(skb);
Eric Dumazet45f00f92012-10-22 21:42:47 +00001597 th = tcp_hdr(skb);
David S. Miller41063e92012-06-19 21:22:05 -07001598
1599 if (th->doff < sizeof(struct tcphdr) / 4)
Paolo Abeni74874492017-09-28 15:51:36 +02001600 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001601
Eric Dumazet45f00f92012-10-22 21:42:47 +00001602 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
David S. Miller41063e92012-06-19 21:22:05 -07001603 iph->saddr, th->source,
Vijay Subramanian7011d082012-06-23 17:38:10 +00001604 iph->daddr, ntohs(th->dest),
David Ahern3fa6f612017-08-07 08:44:17 -07001605 skb->skb_iif, inet_sdif(skb));
David S. Miller41063e92012-06-19 21:22:05 -07001606 if (sk) {
1607 skb->sk = sk;
1608 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001609 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001610 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001611
David S. Miller41063e92012-06-19 21:22:05 -07001612 if (dst)
1613 dst = dst_check(dst, 0);
David S. Miller92101b32012-07-23 16:29:00 -07001614 if (dst &&
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001615 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
David S. Miller92101b32012-07-23 16:29:00 -07001616 skb_dst_set_noref(skb, dst);
David S. Miller41063e92012-06-19 21:22:05 -07001617 }
1618 }
Paolo Abeni74874492017-09-28 15:51:36 +02001619 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001620}
1621
Eric Dumazetc9c33212016-08-27 07:37:54 -07001622bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1623{
1624 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
Eric Dumazet4f693b52018-11-27 14:42:03 -08001625 struct skb_shared_info *shinfo;
1626 const struct tcphdr *th;
1627 struct tcphdr *thtail;
1628 struct sk_buff *tail;
1629 unsigned int hdrlen;
1630 bool fragstolen;
1631 u32 gso_segs;
1632 int delta;
Eric Dumazetc9c33212016-08-27 07:37:54 -07001633
1634 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1635 * we can fix skb->truesize to its real value to avoid future drops.
1636 * This is valid because skb is not yet charged to the socket.
1637 * It has been noticed pure SACK packets were sometimes dropped
1638 * (if cooked by drivers without copybreak feature).
1639 */
Eric Dumazet60b1af32017-01-24 14:57:36 -08001640 skb_condense(skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001641
Eric Dumazetade96282018-11-19 17:45:55 -08001642 skb_dst_drop(skb);
1643
Eric Dumazet4f693b52018-11-27 14:42:03 -08001644 if (unlikely(tcp_checksum_complete(skb))) {
1645 bh_unlock_sock(sk);
1646 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1647 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1648 return true;
1649 }
1650
1651 /* Attempt coalescing to last skb in backlog, even if we are
1652 * above the limits.
1653 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1654 */
1655 th = (const struct tcphdr *)skb->data;
1656 hdrlen = th->doff * 4;
1657 shinfo = skb_shinfo(skb);
1658
1659 if (!shinfo->gso_size)
1660 shinfo->gso_size = skb->len - hdrlen;
1661
1662 if (!shinfo->gso_segs)
1663 shinfo->gso_segs = 1;
1664
1665 tail = sk->sk_backlog.tail;
1666 if (!tail)
1667 goto no_coalesce;
1668 thtail = (struct tcphdr *)tail->data;
1669
1670 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1671 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1672 ((TCP_SKB_CB(tail)->tcp_flags |
Eric Dumazetca2fe292019-04-26 10:10:05 -07001673 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1674 !((TCP_SKB_CB(tail)->tcp_flags &
1675 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
Eric Dumazet4f693b52018-11-27 14:42:03 -08001676 ((TCP_SKB_CB(tail)->tcp_flags ^
1677 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1678#ifdef CONFIG_TLS_DEVICE
1679 tail->decrypted != skb->decrypted ||
1680#endif
1681 thtail->doff != th->doff ||
1682 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1683 goto no_coalesce;
1684
1685 __skb_pull(skb, hdrlen);
1686 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1687 thtail->window = th->window;
1688
1689 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1690
1691 if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
1692 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1693
Eric Dumazetca2fe292019-04-26 10:10:05 -07001694 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1695 * thtail->fin, so that the fast path in tcp_rcv_established()
1696 * is not entered if we append a packet with a FIN.
1697 * SYN, RST, URG are not present.
1698 * ACK is set on both packets.
1699 * PSH : we do not really care in TCP stack,
1700 * at least for 'GRO' packets.
1701 */
1702 thtail->fin |= th->fin;
Eric Dumazet4f693b52018-11-27 14:42:03 -08001703 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1704
1705 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1706 TCP_SKB_CB(tail)->has_rxtstamp = true;
1707 tail->tstamp = skb->tstamp;
1708 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1709 }
1710
1711 /* Not as strict as GRO. We only need to carry mss max value */
1712 skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
1713 skb_shinfo(tail)->gso_size);
1714
1715 gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
1716 skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
1717
1718 sk->sk_backlog.len += delta;
1719 __NET_INC_STATS(sock_net(sk),
1720 LINUX_MIB_TCPBACKLOGCOALESCE);
1721 kfree_skb_partial(skb, fragstolen);
1722 return false;
1723 }
1724 __skb_push(skb, hdrlen);
1725
1726no_coalesce:
1727 /* Only socket owner can try to collapse/prune rx queues
1728 * to reduce memory overhead, so add a little headroom here.
1729 * Few sockets backlog are possibly concurrently non empty.
1730 */
1731 limit += 64*1024;
1732
Eric Dumazetc9c33212016-08-27 07:37:54 -07001733 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1734 bh_unlock_sock(sk);
1735 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1736 return true;
1737 }
1738 return false;
1739}
1740EXPORT_SYMBOL(tcp_add_backlog);
1741
Eric Dumazetac6e7802016-11-10 13:12:35 -08001742int tcp_filter(struct sock *sk, struct sk_buff *skb)
1743{
1744 struct tcphdr *th = (struct tcphdr *)skb->data;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001745
Christoph Paaschf2feaef2019-03-11 11:41:05 -07001746 return sk_filter_trim_cap(sk, skb, th->doff * 4);
Eric Dumazetac6e7802016-11-10 13:12:35 -08001747}
1748EXPORT_SYMBOL(tcp_filter);
1749
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001750static void tcp_v4_restore_cb(struct sk_buff *skb)
1751{
1752 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1753 sizeof(struct inet_skb_parm));
1754}
1755
1756static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1757 const struct tcphdr *th)
1758{
1759 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1760 * barrier() makes sure compiler wont play fool^Waliasing games.
1761 */
1762 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1763 sizeof(struct inet_skb_parm));
1764 barrier();
1765
1766 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1767 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1768 skb->len - th->doff * 4);
1769 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1770 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1771 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1772 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1773 TCP_SKB_CB(skb)->sacked = 0;
1774 TCP_SKB_CB(skb)->has_rxtstamp =
1775 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1776}
1777
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778/*
1779 * From tcp_input.c
1780 */
1781
1782int tcp_v4_rcv(struct sk_buff *skb)
1783{
Eric Dumazet3b24d852016-04-01 08:52:17 -07001784 struct net *net = dev_net(skb->dev);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001785 struct sk_buff *skb_to_free;
David Ahern3fa6f612017-08-07 08:44:17 -07001786 int sdif = inet_sdif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001787 const struct iphdr *iph;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001788 const struct tcphdr *th;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001789 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 struct sock *sk;
1791 int ret;
1792
1793 if (skb->pkt_type != PACKET_HOST)
1794 goto discard_it;
1795
1796 /* Count it even if it's bad */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001797 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
1799 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1800 goto discard_it;
1801
Eric Dumazetea1627c2016-05-13 09:16:40 -07001802 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
Eric Dumazetea1627c2016-05-13 09:16:40 -07001804 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 goto bad_packet;
1806 if (!pskb_may_pull(skb, th->doff * 4))
1807 goto discard_it;
1808
1809 /* An explanation is required here, I think.
1810 * Packet length and doff are validated by header prediction,
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001811 * provided case of th->doff==0 is eliminated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 * So, we defer the checks. */
Tom Herberted70fcf2014-05-02 16:29:38 -07001813
1814 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001815 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Eric Dumazetea1627c2016-05-13 09:16:40 -07001817 th = (const struct tcphdr *)skb->data;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001818 iph = ip_hdr(skb);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001819lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001820 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
David Ahern3fa6f612017-08-07 08:44:17 -07001821 th->dest, sdif, &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822 if (!sk)
1823 goto no_tcp_socket;
1824
Eric Dumazetbb134d52010-03-09 05:55:56 +00001825process:
1826 if (sk->sk_state == TCP_TIME_WAIT)
1827 goto do_time_wait;
1828
Eric Dumazet079096f2015-10-02 11:43:32 -07001829 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1830 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001831 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001832 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001833
1834 sk = req->rsk_listener;
Eric Dumazet72923552016-02-11 22:50:29 -08001835 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001836 sk_drops_add(sk, skb);
Eric Dumazet72923552016-02-11 22:50:29 -08001837 reqsk_put(req);
1838 goto discard_it;
1839 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001840 if (tcp_checksum_complete(skb)) {
1841 reqsk_put(req);
1842 goto csum_error;
1843 }
Eric Dumazet77166822016-02-18 05:39:18 -08001844 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001845 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001846 goto lookup;
1847 }
Eric Dumazet3b24d852016-04-01 08:52:17 -07001848 /* We own a reference on the listener, increase it again
1849 * as we might lose it too soon.
1850 */
Eric Dumazet77166822016-02-18 05:39:18 -08001851 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001852 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001853 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001854 if (!tcp_filter(sk, skb)) {
1855 th = (const struct tcphdr *)skb->data;
1856 iph = ip_hdr(skb);
1857 tcp_v4_fill_cb(skb, iph, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001858 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001859 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001860 if (!nsk) {
1861 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001862 if (req_stolen) {
1863 /* Another cpu got exclusive access to req
1864 * and created a full blown socket.
1865 * Try to feed this packet to this socket
1866 * instead of discarding it.
1867 */
1868 tcp_v4_restore_cb(skb);
1869 sock_put(sk);
1870 goto lookup;
1871 }
Eric Dumazet77166822016-02-18 05:39:18 -08001872 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001873 }
1874 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001875 reqsk_put(req);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001876 tcp_v4_restore_cb(skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001877 } else if (tcp_child_process(sk, nsk, skb)) {
1878 tcp_v4_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001879 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001880 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001881 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001882 return 0;
1883 }
1884 }
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001885 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001886 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001887 goto discard_and_relse;
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001888 }
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001889
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1891 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001892
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001893 if (tcp_v4_inbound_md5_hash(sk, skb))
1894 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001895
Patrick McHardyb59c2702006-01-06 23:06:10 -08001896 nf_reset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
Eric Dumazetac6e7802016-11-10 13:12:35 -08001898 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001900 th = (const struct tcphdr *)skb->data;
1901 iph = ip_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001902 tcp_v4_fill_cb(skb, iph, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903
1904 skb->dev = NULL;
1905
Eric Dumazete994b2f2015-10-02 11:43:39 -07001906 if (sk->sk_state == TCP_LISTEN) {
1907 ret = tcp_v4_do_rcv(sk, skb);
1908 goto put_and_return;
1909 }
1910
1911 sk_incoming_cpu_update(sk);
1912
Ingo Molnarc6366182006-07-03 00:25:13 -07001913 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001914 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 ret = 0;
1916 if (!sock_owned_by_user(sk)) {
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001917 skb_to_free = sk->sk_rx_skb_cache;
1918 sk->sk_rx_skb_cache = NULL;
Florian Westphale7942d02017-07-30 03:57:18 +02001919 ret = tcp_v4_do_rcv(sk, skb);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001920 } else {
1921 if (tcp_add_backlog(sk, skb))
1922 goto discard_and_relse;
1923 skb_to_free = NULL;
Zhu Yi6b03a532010-03-04 18:01:41 +00001924 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 bh_unlock_sock(sk);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001926 if (skb_to_free)
1927 __kfree_skb(skb_to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928
Eric Dumazete994b2f2015-10-02 11:43:39 -07001929put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001930 if (refcounted)
1931 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
1933 return ret;
1934
1935no_tcp_socket:
1936 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1937 goto discard_it;
1938
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001939 tcp_v4_fill_cb(skb, iph, th);
1940
Eric Dumazet12e25e12015-06-03 23:49:21 -07001941 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001942csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001943 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001945 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001947 tcp_v4_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948 }
1949
1950discard_it:
1951 /* Discard frame. */
1952 kfree_skb(skb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001953 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954
1955discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001956 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001957 if (refcounted)
1958 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959 goto discard_it;
1960
1961do_time_wait:
1962 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001963 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 goto discard_it;
1965 }
1966
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001967 tcp_v4_fill_cb(skb, iph, th);
1968
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001969 if (tcp_checksum_complete(skb)) {
1970 inet_twsk_put(inet_twsk(sk));
1971 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 }
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001973 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 case TCP_TW_SYN: {
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001975 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
Craig Galleka5836362016-02-10 11:50:38 -05001976 &tcp_hashinfo, skb,
1977 __tcp_hdrlen(th),
Tom Herbertda5e3632013-01-22 09:50:24 +00001978 iph->saddr, th->source,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001979 iph->daddr, th->dest,
David Ahern3fa6f612017-08-07 08:44:17 -07001980 inet_iif(skb),
1981 sdif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 if (sk2) {
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001983 inet_twsk_deschedule_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 sk = sk2;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001985 tcp_v4_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001986 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 goto process;
1988 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 }
Gustavo A. R. Silvafcfd6df2017-10-16 15:48:55 -05001990 /* to ACK */
1991 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 case TCP_TW_ACK:
1993 tcp_v4_timewait_ack(sk, skb);
1994 break;
1995 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001996 tcp_v4_send_reset(sk, skb);
1997 inet_twsk_deschedule_put(inet_twsk(sk));
1998 goto discard_it;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999 case TCP_TW_SUCCESS:;
2000 }
2001 goto discard_it;
2002}
2003
David S. Millerccb7c412010-12-01 18:09:13 -08002004static struct timewait_sock_ops tcp_timewait_sock_ops = {
2005 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2006 .twsk_unique = tcp_twsk_unique,
2007 .twsk_destructor= tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08002008};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009
Eric Dumazet63d02d12012-08-09 14:11:00 +00002010void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
Eric Dumazet5d299f32012-08-06 05:09:33 +00002011{
2012 struct dst_entry *dst = skb_dst(skb);
2013
Eric Dumazet5037e9e2015-12-14 14:08:53 -08002014 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -07002015 sk->sk_rx_dst = dst;
2016 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2017 }
Eric Dumazet5d299f32012-08-06 05:09:33 +00002018}
Eric Dumazet63d02d12012-08-09 14:11:00 +00002019EXPORT_SYMBOL(inet_sk_rx_dst_set);
Eric Dumazet5d299f32012-08-06 05:09:33 +00002020
Stephen Hemminger3b401a82009-09-01 19:25:04 +00002021const struct inet_connection_sock_af_ops ipv4_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002022 .queue_xmit = ip_queue_xmit,
2023 .send_check = tcp_v4_send_check,
2024 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00002025 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002026 .conn_request = tcp_v4_conn_request,
2027 .syn_recv_sock = tcp_v4_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002028 .net_header_len = sizeof(struct iphdr),
2029 .setsockopt = ip_setsockopt,
2030 .getsockopt = ip_getsockopt,
2031 .addr2sockaddr = inet_csk_addr2sockaddr,
2032 .sockaddr_len = sizeof(struct sockaddr_in),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002033#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002034 .compat_setsockopt = compat_ip_setsockopt,
2035 .compat_getsockopt = compat_ip_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002036#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04002037 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00002039EXPORT_SYMBOL(ipv4_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002041#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00002042static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002043 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07002044 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002045 .md5_parse = tcp_v4_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002046};
Andrew Mortonb6332e62006-11-30 19:16:28 -08002047#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049/* NOTE: A lot of things set to zero explicitly by call to
2050 * sk_alloc() so need not be done here.
2051 */
2052static int tcp_v4_init_sock(struct sock *sk)
2053{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002054 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055
Neal Cardwell900f65d2012-04-19 09:55:21 +00002056 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08002058 icsk->icsk_af_ops = &ipv4_specific;
Neal Cardwell900f65d2012-04-19 09:55:21 +00002059
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002060#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04002061 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002062#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 return 0;
2065}
2066
Brian Haley7d06b2e2008-06-14 17:04:49 -07002067void tcp_v4_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068{
2069 struct tcp_sock *tp = tcp_sk(sk);
2070
Song Liue1a4aa52017-10-23 09:20:26 -07002071 trace_tcp_destroy_sock(sk);
2072
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 tcp_clear_xmit_timers(sk);
2074
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002075 tcp_cleanup_congestion_control(sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002076
Dave Watson734942c2017-06-14 11:37:14 -07002077 tcp_cleanup_ulp(sk);
2078
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 /* Cleanup up the write buffer. */
David S. Millerfe067e82007-03-07 12:12:44 -08002080 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
Wei Wangcf1ef3f2017-04-20 14:45:46 -07002082 /* Check if we want to disable active TFO */
2083 tcp_fastopen_active_disable_ofo_check(sk);
2084
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 /* Cleans up our, hopefully empty, out_of_order_queue. */
Yaogong Wang9f5afea2016-09-07 14:49:28 -07002086 skb_rbtree_purge(&tp->out_of_order_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002088#ifdef CONFIG_TCP_MD5SIG
2089 /* Clean up the MD5 key list, if any */
2090 if (tp->md5sig_info) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00002091 tcp_clear_md5_list(sk);
Mat Martineaufb7df5e2017-12-21 10:29:10 -08002092 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002093 tp->md5sig_info = NULL;
2094 }
2095#endif
2096
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 /* Clean up a referenced TCP bind bucket. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002098 if (inet_csk(sk)->icsk_bind_hash)
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002099 inet_put_port(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100
Ian Morris00db4122015-04-03 09:17:27 +01002101 BUG_ON(tp->fastopen_rsk);
William Allen Simpson435cf552009-12-02 18:17:05 +00002102
Yuchung Chengcf60af02012-07-19 06:43:09 +00002103 /* If socket is aborted during connect operation */
2104 tcp_free_fastopen_req(tp);
Yuchung Cheng1fba70e2017-10-18 11:22:51 -07002105 tcp_fastopen_destroy_cipher(sk);
Eric Dumazetcd8ae852015-05-03 21:34:46 -07002106 tcp_saved_syn_free(tp);
Yuchung Chengcf60af02012-07-19 06:43:09 +00002107
Glauber Costa180d8cd2011-12-11 21:47:02 +00002108 sk_sockets_allocated_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110EXPORT_SYMBOL(tcp_v4_destroy_sock);
2111
2112#ifdef CONFIG_PROC_FS
2113/* Proc filesystem TCP sock list dumping. */
2114
Tom Herberta8b690f2010-06-07 00:43:42 -07002115/*
2116 * Get next listener socket follow cur. If cur is NULL, get first socket
2117 * starting from bucket given in st->bucket; when st->bucket is zero the
2118 * very first socket in the hash table is returned.
2119 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120static void *listening_get_next(struct seq_file *seq, void *cur)
2121{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002122 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08002123 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002124 struct net *net = seq_file_net(seq);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002125 struct inet_listen_hashbucket *ilb;
Eric Dumazet3b24d852016-04-01 08:52:17 -07002126 struct sock *sk = cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
2128 if (!sk) {
Eric Dumazet3b24d852016-04-01 08:52:17 -07002129get_head:
Tom Herberta8b690f2010-06-07 00:43:42 -07002130 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Eric Dumazet9652dc22016-10-19 21:24:58 -07002131 spin_lock(&ilb->lock);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002132 sk = sk_head(&ilb->head);
Tom Herberta8b690f2010-06-07 00:43:42 -07002133 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 goto get_sk;
2135 }
Eric Dumazet5caea4e2008-11-20 00:40:07 -08002136 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002138 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
Eric Dumazet3b24d852016-04-01 08:52:17 -07002140 sk = sk_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141get_sk:
Eric Dumazet3b24d852016-04-01 08:52:17 -07002142 sk_for_each_from(sk) {
Pavel Emelyanov8475ef92010-11-22 03:26:12 +00002143 if (!net_eq(sock_net(sk), net))
2144 continue;
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002145 if (sk->sk_family == afinfo->family)
Eric Dumazet3b24d852016-04-01 08:52:17 -07002146 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 }
Eric Dumazet9652dc22016-10-19 21:24:58 -07002148 spin_unlock(&ilb->lock);
Tom Herberta8b690f2010-06-07 00:43:42 -07002149 st->offset = 0;
Eric Dumazet3b24d852016-04-01 08:52:17 -07002150 if (++st->bucket < INET_LHTABLE_SIZE)
2151 goto get_head;
2152 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153}
2154
2155static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2156{
Tom Herberta8b690f2010-06-07 00:43:42 -07002157 struct tcp_iter_state *st = seq->private;
2158 void *rc;
2159
2160 st->bucket = 0;
2161 st->offset = 0;
2162 rc = listening_get_next(seq, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163
2164 while (rc && *pos) {
2165 rc = listening_get_next(seq, rc);
2166 --*pos;
2167 }
2168 return rc;
2169}
2170
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002171static inline bool empty_bucket(const struct tcp_iter_state *st)
Andi Kleen6eac5602008-08-28 01:08:02 -07002172{
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002173 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
Andi Kleen6eac5602008-08-28 01:08:02 -07002174}
2175
Tom Herberta8b690f2010-06-07 00:43:42 -07002176/*
2177 * Get first established socket starting from bucket given in st->bucket.
2178 * If st->bucket is zero, the very first socket in the hash is returned.
2179 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180static void *established_get_first(struct seq_file *seq)
2181{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002182 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08002183 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002184 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 void *rc = NULL;
2186
Tom Herberta8b690f2010-06-07 00:43:42 -07002187 st->offset = 0;
2188 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 struct sock *sk;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002190 struct hlist_nulls_node *node;
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002191 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
Andi Kleen6eac5602008-08-28 01:08:02 -07002193 /* Lockless fast path for the common case of empty buckets */
2194 if (empty_bucket(st))
2195 continue;
2196
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002197 spin_lock_bh(lock);
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002198 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002199 if (sk->sk_family != afinfo->family ||
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002200 !net_eq(sock_net(sk), net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 continue;
2202 }
2203 rc = sk;
2204 goto out;
2205 }
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002206 spin_unlock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 }
2208out:
2209 return rc;
2210}
2211
2212static void *established_get_next(struct seq_file *seq, void *cur)
2213{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002214 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 struct sock *sk = cur;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002216 struct hlist_nulls_node *node;
Jianjun Kong5799de02008-11-03 02:49:10 -08002217 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002218 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
2220 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002221 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002223 sk = sk_nulls_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002225 sk_nulls_for_each_from(sk, node) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002226 if (sk->sk_family == afinfo->family &&
2227 net_eq(sock_net(sk), net))
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002228 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 }
2230
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002231 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2232 ++st->bucket;
2233 return established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234}
2235
2236static void *established_get_idx(struct seq_file *seq, loff_t pos)
2237{
Tom Herberta8b690f2010-06-07 00:43:42 -07002238 struct tcp_iter_state *st = seq->private;
2239 void *rc;
2240
2241 st->bucket = 0;
2242 rc = established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
2244 while (rc && pos) {
2245 rc = established_get_next(seq, rc);
2246 --pos;
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02002247 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 return rc;
2249}
2250
2251static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2252{
2253 void *rc;
Jianjun Kong5799de02008-11-03 02:49:10 -08002254 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 st->state = TCP_SEQ_STATE_LISTENING;
2257 rc = listening_get_idx(seq, &pos);
2258
2259 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 st->state = TCP_SEQ_STATE_ESTABLISHED;
2261 rc = established_get_idx(seq, pos);
2262 }
2263
2264 return rc;
2265}
2266
Tom Herberta8b690f2010-06-07 00:43:42 -07002267static void *tcp_seek_last_pos(struct seq_file *seq)
2268{
2269 struct tcp_iter_state *st = seq->private;
2270 int offset = st->offset;
2271 int orig_num = st->num;
2272 void *rc = NULL;
2273
2274 switch (st->state) {
Tom Herberta8b690f2010-06-07 00:43:42 -07002275 case TCP_SEQ_STATE_LISTENING:
2276 if (st->bucket >= INET_LHTABLE_SIZE)
2277 break;
2278 st->state = TCP_SEQ_STATE_LISTENING;
2279 rc = listening_get_next(seq, NULL);
2280 while (offset-- && rc)
2281 rc = listening_get_next(seq, rc);
2282 if (rc)
2283 break;
2284 st->bucket = 0;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002285 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002286 /* Fallthrough */
2287 case TCP_SEQ_STATE_ESTABLISHED:
Tom Herberta8b690f2010-06-07 00:43:42 -07002288 if (st->bucket > tcp_hashinfo.ehash_mask)
2289 break;
2290 rc = established_get_first(seq);
2291 while (offset-- && rc)
2292 rc = established_get_next(seq, rc);
2293 }
2294
2295 st->num = orig_num;
2296
2297 return rc;
2298}
2299
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002300void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301{
Jianjun Kong5799de02008-11-03 02:49:10 -08002302 struct tcp_iter_state *st = seq->private;
Tom Herberta8b690f2010-06-07 00:43:42 -07002303 void *rc;
2304
2305 if (*pos && *pos == st->last_pos) {
2306 rc = tcp_seek_last_pos(seq);
2307 if (rc)
2308 goto out;
2309 }
2310
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 st->state = TCP_SEQ_STATE_LISTENING;
2312 st->num = 0;
Tom Herberta8b690f2010-06-07 00:43:42 -07002313 st->bucket = 0;
2314 st->offset = 0;
2315 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2316
2317out:
2318 st->last_pos = *pos;
2319 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002321EXPORT_SYMBOL(tcp_seq_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002323void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324{
Tom Herberta8b690f2010-06-07 00:43:42 -07002325 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 void *rc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
2328 if (v == SEQ_START_TOKEN) {
2329 rc = tcp_get_idx(seq, 0);
2330 goto out;
2331 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
2333 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 case TCP_SEQ_STATE_LISTENING:
2335 rc = listening_get_next(seq, v);
2336 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002338 st->bucket = 0;
2339 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 rc = established_get_first(seq);
2341 }
2342 break;
2343 case TCP_SEQ_STATE_ESTABLISHED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 rc = established_get_next(seq, v);
2345 break;
2346 }
2347out:
2348 ++*pos;
Tom Herberta8b690f2010-06-07 00:43:42 -07002349 st->last_pos = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 return rc;
2351}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002352EXPORT_SYMBOL(tcp_seq_next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002354void tcp_seq_stop(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355{
Jianjun Kong5799de02008-11-03 02:49:10 -08002356 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357
2358 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359 case TCP_SEQ_STATE_LISTENING:
2360 if (v != SEQ_START_TOKEN)
Eric Dumazet9652dc22016-10-19 21:24:58 -07002361 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 case TCP_SEQ_STATE_ESTABLISHED:
2364 if (v)
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002365 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366 break;
2367 }
2368}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002369EXPORT_SYMBOL(tcp_seq_stop);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370
Eric Dumazetd4f06872015-03-12 16:44:09 -07002371static void get_openreq4(const struct request_sock *req,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002372 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002374 const struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetfa76ce732015-03-19 19:04:20 -07002375 long delta = req->rsk_timer.expires - jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002377 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002378 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 i,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002380 ireq->ir_loc_addr,
Eric Dumazetd4f06872015-03-12 16:44:09 -07002381 ireq->ir_num,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002382 ireq->ir_rmt_addr,
2383 ntohs(ireq->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 TCP_SYN_RECV,
2385 0, 0, /* could print option size, but that is af dependent. */
2386 1, /* timers active (only the expire timer) */
Eric Dumazeta399a802012-08-08 21:13:53 +00002387 jiffies_delta_to_clock_t(delta),
Eric Dumazete6c022a2012-10-27 23:16:46 +00002388 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002389 from_kuid_munged(seq_user_ns(f),
2390 sock_i_uid(req->rsk_listener)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391 0, /* non standard timer */
2392 0, /* open_requests have no inode */
Eric Dumazetd4f06872015-03-12 16:44:09 -07002393 0,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002394 req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395}
2396
Tetsuo Handa652586d2013-11-14 14:31:57 -08002397static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398{
2399 int timer_active;
2400 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002401 const struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002402 const struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002403 const struct inet_sock *inet = inet_sk(sk);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002404 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00002405 __be32 dest = inet->inet_daddr;
2406 __be32 src = inet->inet_rcv_saddr;
2407 __u16 destp = ntohs(inet->inet_dport);
2408 __u16 srcp = ntohs(inet->inet_sport);
Eric Dumazet49d09002009-12-03 16:06:13 -08002409 int rx_queue;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002410 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002412 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08002413 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002414 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002416 timer_expires = icsk->icsk_timeout;
2417 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002419 timer_expires = icsk->icsk_timeout;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002420 } else if (timer_pending(&sk->sk_timer)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 timer_active = 2;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002422 timer_expires = sk->sk_timer.expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 } else {
2424 timer_active = 0;
2425 timer_expires = jiffies;
2426 }
2427
Yafang Shao986ffdf2017-12-20 11:12:52 +08002428 state = inet_sk_state_load(sk);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002429 if (state == TCP_LISTEN)
Eric Dumazet49d09002009-12-03 16:06:13 -08002430 rx_queue = sk->sk_ack_backlog;
2431 else
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002432 /* Because we don't lock the socket,
2433 * we might find a transient negative value.
Eric Dumazet49d09002009-12-03 16:06:13 -08002434 */
2435 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2436
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002437 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
Tetsuo Handa652586d2013-11-14 14:31:57 -08002438 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002439 i, src, srcp, dest, destp, state,
Sridhar Samudrala47da8ee2006-06-27 13:29:00 -07002440 tp->write_seq - tp->snd_una,
Eric Dumazet49d09002009-12-03 16:06:13 -08002441 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00002443 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002444 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002445 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002446 icsk->icsk_probes_out,
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002447 sock_i_ino(sk),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002448 refcount_read(&sk->sk_refcnt), sk,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002449 jiffies_to_clock_t(icsk->icsk_rto),
2450 jiffies_to_clock_t(icsk->icsk_ack.ato),
Wei Wang31954cd2019-01-25 10:53:19 -08002451 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002453 state == TCP_LISTEN ?
2454 fastopenq->max_qlen :
Tetsuo Handa652586d2013-11-14 14:31:57 -08002455 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456}
2457
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002458static void get_timewait4_sock(const struct inet_timewait_sock *tw,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002459 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460{
Eric Dumazet789f5582015-04-12 18:51:09 -07002461 long delta = tw->tw_timer.expires - jiffies;
Al Viro23f33c22006-09-27 18:43:50 -07002462 __be32 dest, src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464
2465 dest = tw->tw_daddr;
2466 src = tw->tw_rcv_saddr;
2467 destp = ntohs(tw->tw_dport);
2468 srcp = ntohs(tw->tw_sport);
2469
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002470 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002471 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002473 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002474 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475}
2476
2477#define TMPSZ 150
2478
2479static int tcp4_seq_show(struct seq_file *seq, void *v)
2480{
Jianjun Kong5799de02008-11-03 02:49:10 -08002481 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002482 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483
Tetsuo Handa652586d2013-11-14 14:31:57 -08002484 seq_setwidth(seq, TMPSZ - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 if (v == SEQ_START_TOKEN) {
Tetsuo Handa652586d2013-11-14 14:31:57 -08002486 seq_puts(seq, " sl local_address rem_address st tx_queue "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 "rx_queue tr tm->when retrnsmt uid timeout "
2488 "inode");
2489 goto out;
2490 }
2491 st = seq->private;
2492
Eric Dumazet079096f2015-10-02 11:43:32 -07002493 if (sk->sk_state == TCP_TIME_WAIT)
2494 get_timewait4_sock(v, seq, st->num);
2495 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002496 get_openreq4(v, seq, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002497 else
2498 get_tcp4_sock(v, seq, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499out:
Tetsuo Handa652586d2013-11-14 14:31:57 -08002500 seq_pad(seq, '\n');
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 return 0;
2502}
2503
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002504static const struct seq_operations tcp4_seq_ops = {
2505 .show = tcp4_seq_show,
2506 .start = tcp_seq_start,
2507 .next = tcp_seq_next,
2508 .stop = tcp_seq_stop,
2509};
2510
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511static struct tcp_seq_afinfo tcp4_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 .family = AF_INET,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513};
2514
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002515static int __net_init tcp4_proc_init_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002516{
Christoph Hellwigc3506372018-04-10 19:42:55 +02002517 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2518 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002519 return -ENOMEM;
2520 return 0;
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002521}
2522
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002523static void __net_exit tcp4_proc_exit_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002524{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002525 remove_proc_entry("tcp", net->proc_net);
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002526}
2527
2528static struct pernet_operations tcp4_net_ops = {
2529 .init = tcp4_proc_init_net,
2530 .exit = tcp4_proc_exit_net,
2531};
2532
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533int __init tcp4_proc_init(void)
2534{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002535 return register_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536}
2537
2538void tcp4_proc_exit(void)
2539{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002540 unregister_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541}
2542#endif /* CONFIG_PROC_FS */
2543
2544struct proto tcp_prot = {
2545 .name = "TCP",
2546 .owner = THIS_MODULE,
2547 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002548 .pre_connect = tcp_v4_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 .connect = tcp_v4_connect,
2550 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002551 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002552 .ioctl = tcp_ioctl,
2553 .init = tcp_v4_init_sock,
2554 .destroy = tcp_v4_destroy_sock,
2555 .shutdown = tcp_shutdown,
2556 .setsockopt = tcp_setsockopt,
2557 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01002558 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002560 .sendmsg = tcp_sendmsg,
2561 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002562 .backlog_rcv = tcp_v4_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002563 .release_cb = tcp_release_cb,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002564 .hash = inet_hash,
2565 .unhash = inet_unhash,
2566 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07002568 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07002569 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 .sockets_allocated = &tcp_sockets_allocated,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002571 .orphan_count = &tcp_orphan_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572 .memory_allocated = &tcp_memory_allocated,
2573 .memory_pressure = &tcp_memory_pressure,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07002574 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08002575 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2576 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 .max_header = MAX_TCP_HEADER,
2578 .obj_size = sizeof(struct tcp_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08002579 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002580 .twsk_prot = &tcp_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002581 .rsk_prot = &tcp_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002582 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002583 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002584#ifdef CONFIG_COMPAT
2585 .compat_setsockopt = compat_tcp_setsockopt,
2586 .compat_getsockopt = compat_tcp_getsockopt,
2587#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002588 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00002590EXPORT_SYMBOL(tcp_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
Denis V. Lunev046ee902008-04-03 14:31:33 -07002592static void __net_exit tcp_sk_exit(struct net *net)
2593{
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002594 int cpu;
2595
Dust Lib506bc92019-04-01 16:04:53 +08002596 if (net->ipv4.tcp_congestion_control)
2597 module_put(net->ipv4.tcp_congestion_control->owner);
Stephen Hemminger6670e152017-11-14 08:25:49 -08002598
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002599 for_each_possible_cpu(cpu)
2600 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2601 free_percpu(net->ipv4.tcp_sk);
2602}
2603
2604static int __net_init tcp_sk_init(struct net *net)
2605{
Haishuang Yanfee83d02016-12-28 17:52:33 +08002606 int res, cpu, cnt;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002607
2608 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2609 if (!net->ipv4.tcp_sk)
2610 return -ENOMEM;
2611
2612 for_each_possible_cpu(cpu) {
2613 struct sock *sk;
2614
2615 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2616 IPPROTO_TCP, net);
2617 if (res)
2618 goto fail;
Eric Dumazeta9d65322016-04-01 08:52:21 -07002619 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
Eric Dumazet431280e2018-08-22 13:30:45 -07002620
2621 /* Please enforce IP_DF and IPID==0 for RST and
2622 * ACK sent in SYN-RECV and TIME-WAIT state.
2623 */
2624 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2625
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002626 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2627 }
Daniel Borkmann49213552015-05-19 21:04:22 +02002628
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002629 net->ipv4.sysctl_tcp_ecn = 2;
Daniel Borkmann49213552015-05-19 21:04:22 +02002630 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2631
Fan Dub0f9ca52015-02-10 09:53:16 +08002632 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
Fan Du6b58e0a2015-03-06 11:18:23 +08002633 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
Fan Du05cbc0d2015-03-06 11:18:24 +08002634 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002635
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002636 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
Nikolay Borisov9bd68612016-01-07 16:38:44 +02002637 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
Nikolay Borisovb840d152016-01-07 16:38:45 +02002638 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002639
Nikolay Borisov6fa25162016-02-03 09:46:49 +02002640 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
Nikolay Borisov7c083ec2016-02-03 09:46:50 +02002641 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
David S. Miller0aca7372016-02-08 04:24:33 -05002642 net->ipv4.sysctl_tcp_syncookies = 1;
Nikolay Borisov1043e252016-02-03 09:46:52 +02002643 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
Nikolay Borisovae5c3f42016-02-03 09:46:53 +02002644 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
Nikolay Borisovc6214a92016-02-03 09:46:54 +02002645 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
Nikolay Borisovc402d9b2016-02-03 09:46:55 +02002646 net->ipv4.sysctl_tcp_orphan_retries = 0;
Nikolay Borisov1e579ca2016-02-03 09:46:56 +02002647 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
Nikolay Borisov4979f2d2016-02-03 09:46:57 +02002648 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -07002649 net->ipv4.sysctl_tcp_tw_reuse = 2;
Nikolay Borisov12ed8242016-02-03 09:46:51 +02002650
Haishuang Yanfee83d02016-12-28 17:52:33 +08002651 cnt = tcp_hashinfo.ehash_mask + 1;
Yafang Shao743e4812018-09-01 20:21:05 +08002652 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
Haishuang Yan1946e672016-12-28 17:52:32 +08002653 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2654
Haishuang Yanfee83d02016-12-28 17:52:33 +08002655 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
Eric Dumazetf9301032017-06-07 10:34:37 -07002656 net->ipv4.sysctl_tcp_sack = 1;
Eric Dumazet9bb37ef2017-06-07 10:34:38 -07002657 net->ipv4.sysctl_tcp_window_scaling = 1;
Eric Dumazet5d2ed052017-06-07 10:34:39 -07002658 net->ipv4.sysctl_tcp_timestamps = 1;
Eric Dumazet2ae21cf2017-10-26 21:54:56 -07002659 net->ipv4.sysctl_tcp_early_retrans = 3;
Eric Dumazete20223f2017-10-26 21:54:57 -07002660 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
Eric Dumazetb510f0d2017-10-26 21:54:59 -07002661 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
Eric Dumazete0a1e5b2017-10-26 21:55:00 -07002662 net->ipv4.sysctl_tcp_retrans_collapse = 1;
Eric Dumazetc6e21802017-10-26 21:55:06 -07002663 net->ipv4.sysctl_tcp_max_reordering = 300;
Eric Dumazet6496f6b2017-10-26 21:55:07 -07002664 net->ipv4.sysctl_tcp_dsack = 1;
Eric Dumazet0c126542017-10-26 21:55:08 -07002665 net->ipv4.sysctl_tcp_app_win = 31;
Eric Dumazet94f08932017-10-26 21:55:09 -07002666 net->ipv4.sysctl_tcp_adv_win_scale = 1;
Eric Dumazetaf9b69a2017-10-26 21:55:10 -07002667 net->ipv4.sysctl_tcp_frto = 2;
Eric Dumazet4540c0c2017-10-27 07:47:22 -07002668 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
Eric Dumazetd06a9902017-10-27 07:47:23 -07002669 /* This limits the percentage of the congestion window which we
2670 * will allow a single TSO frame to consume. Building TSO frames
2671 * which are too large can cause TCP streams to be bursty.
2672 */
2673 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
Eric Dumazetc73e5802018-11-11 07:34:28 -08002674 /* Default TSQ limit of 16 TSO segments */
2675 net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
Eric Dumazetb530b682017-10-27 07:47:26 -07002676 /* rfc5961 challenge ack rate limiting */
2677 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
Eric Dumazet26e95962017-10-27 07:47:27 -07002678 net->ipv4.sysctl_tcp_min_tso_segs = 2;
Eric Dumazetbd239702017-10-27 07:47:28 -07002679 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
Eric Dumazet790f00e2017-10-27 07:47:29 -07002680 net->ipv4.sysctl_tcp_autocorking = 1;
Eric Dumazet4170ba62017-10-27 07:47:30 -07002681 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
Eric Dumazet23a7102a2017-10-27 07:47:31 -07002682 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
Eric Dumazetc26e91f2017-10-27 07:47:32 -07002683 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
Eric Dumazet356d1832017-11-07 00:29:28 -08002684 if (net != &init_net) {
2685 memcpy(net->ipv4.sysctl_tcp_rmem,
2686 init_net.ipv4.sysctl_tcp_rmem,
2687 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2688 memcpy(net->ipv4.sysctl_tcp_wmem,
2689 init_net.ipv4.sysctl_tcp_wmem,
2690 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2691 }
Eric Dumazet6d82aa22018-05-17 14:47:28 -07002692 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
Eric Dumazet9c21d2f2018-05-17 14:47:29 -07002693 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002694 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
Haishuang Yan43713842017-09-27 11:35:42 +08002695 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
Haishuang Yan3733be12017-09-27 11:35:43 +08002696 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2697 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002698
Stephen Hemminger6670e152017-11-14 08:25:49 -08002699 /* Reno is always built in */
2700 if (!net_eq(net, &init_net) &&
2701 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2702 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2703 else
2704 net->ipv4.tcp_congestion_control = &tcp_reno;
2705
Daniel Borkmann49213552015-05-19 21:04:22 +02002706 return 0;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002707fail:
2708 tcp_sk_exit(net);
2709
2710 return res;
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002711}
2712
2713static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2714{
Haishuang Yan43713842017-09-27 11:35:42 +08002715 struct net *net;
2716
Haishuang Yan1946e672016-12-28 17:52:32 +08002717 inet_twsk_purge(&tcp_hashinfo, AF_INET);
Haishuang Yan43713842017-09-27 11:35:42 +08002718
2719 list_for_each_entry(net, net_exit_list, exit_list)
2720 tcp_fastopen_ctx_destroy(net);
Denis V. Lunev046ee902008-04-03 14:31:33 -07002721}
2722
2723static struct pernet_operations __net_initdata tcp_sk_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002724 .init = tcp_sk_init,
2725 .exit = tcp_sk_exit,
2726 .exit_batch = tcp_sk_exit_batch,
Denis V. Lunev046ee902008-04-03 14:31:33 -07002727};
2728
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08002729void __init tcp_v4_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730{
Eric W. Biederman6a1b3052009-02-22 00:10:18 -08002731 if (register_pernet_subsys(&tcp_sk_ops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 panic("Failed to create the TCP control socket.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733}