blob: cfa81190a1b1af30d05f4f6cd84c05b025a6afeb [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * IPv4 specific functions
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19/*
20 * Changes:
21 * David S. Miller : New socket lookup architecture.
22 * This code is dedicated to John Dyson.
23 * David S. Miller : Change semantics of established hash,
24 * half is devoted to TIME_WAIT sockets
25 * and the rest go in the other half.
26 * Andi Kleen : Add support for syncookies and fixed
27 * some bugs: ip options weren't passed to
28 * the TCP layer, missed a check for an
29 * ACK bit.
30 * Andi Kleen : Implemented fast path mtu discovery.
31 * Fixed many serious bugs in the
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070032 * request_sock handling and moved
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 * most of it into the af independent code.
34 * Added tail drop and some other bugfixes.
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -080035 * Added new listen semantics.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * Mike McLagan : Routing by source
37 * Juan Jose Ciarlante: ip_dynaddr bits
38 * Andi Kleen: various fixes.
39 * Vitaly E. Lavrov : Transparent proxy revived after year
40 * coma.
41 * Andi Kleen : Fix new listen.
42 * Andi Kleen : Fix accept error reporting.
43 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
44 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
45 * a single port at the same time.
46 */
47
Joe Perchesafd465032012-03-12 07:03:32 +000048#define pr_fmt(fmt) "TCP: " fmt
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Herbert Xueb4dea52008-12-29 23:04:08 -080050#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <linux/types.h>
52#include <linux/fcntl.h>
53#include <linux/module.h>
54#include <linux/random.h>
55#include <linux/cache.h>
56#include <linux/jhash.h>
57#include <linux/init.h>
58#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090059#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020061#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#include <net/icmp.h>
Arnaldo Carvalho de Melo304a1612005-08-09 19:59:20 -070063#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <net/tcp.h>
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -030065#include <net/transp_v6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <net/ipv6.h>
67#include <net/inet_common.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080068#include <net/timewait_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <net/xfrm.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070070#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030071#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73#include <linux/inet.h>
74#include <linux/ipv6.h>
75#include <linux/stddef.h>
76#include <linux/proc_fs.h>
77#include <linux/seq_file.h>
Ivan Delalande67973182017-06-15 18:07:06 -070078#include <linux/inetdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070079
Herbert Xucf80e0e2016-01-24 21:20:23 +080080#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080081#include <linux/scatterlist.h>
82
Song Liuc24b14c42017-10-23 09:20:24 -070083#include <trace/events/tcp.h>
84
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080085#ifdef CONFIG_TCP_MD5SIG
Eric Dumazeta915da9b2012-01-31 05:18:33 +000086static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -040087 __be32 daddr, __be32 saddr, const struct tcphdr *th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080088#endif
89
Eric Dumazet5caea4e2008-11-20 00:40:07 -080090struct inet_hashinfo tcp_hashinfo;
Eric Dumazet4bc2f182010-07-09 21:22:10 +000091EXPORT_SYMBOL(tcp_hashinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Eric Dumazet84b114b2017-05-05 06:56:54 -070093static u32 tcp_v4_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
Eric Dumazet84b114b2017-05-05 06:56:54 -070095 return secure_tcp_seq(ip_hdr(skb)->daddr,
96 ip_hdr(skb)->saddr,
97 tcp_hdr(skb)->dest,
98 tcp_hdr(skb)->source);
99}
100
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700101static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700102{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700103 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104}
105
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800106int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
107{
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700108 const struct inet_timewait_sock *tw = inet_twsk(sktw);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800109 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
110 struct tcp_sock *tp = tcp_sk(sk);
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700111 int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
112
113 if (reuse == 2) {
114 /* Still does not detect *everything* that goes through
115 * lo, since we require a loopback src or dst address
116 * or direct binding to 'lo' interface.
117 */
118 bool loopback = false;
119 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
120 loopback = true;
121#if IS_ENABLED(CONFIG_IPV6)
122 if (tw->tw_family == AF_INET6) {
123 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
124 (ipv6_addr_v4mapped(&tw->tw_v6_daddr) &&
125 (tw->tw_v6_daddr.s6_addr[12] == 127)) ||
126 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
127 (ipv6_addr_v4mapped(&tw->tw_v6_rcv_saddr) &&
128 (tw->tw_v6_rcv_saddr.s6_addr[12] == 127)))
129 loopback = true;
130 } else
131#endif
132 {
133 if (ipv4_is_loopback(tw->tw_daddr) ||
134 ipv4_is_loopback(tw->tw_rcv_saddr))
135 loopback = true;
136 }
137 if (!loopback)
138 reuse = 0;
139 }
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800140
141 /* With PAWS, it is safe from the viewpoint
142 of data integrity. Even without PAWS it is safe provided sequence
143 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
144
145 Actually, the idea is close to VJ's one, only timestamp cache is
146 held not per host, but per port pair and TW bucket is used as state
147 holder.
148
149 If TW bucket has been already destroyed we fall back to VJ's scheme
150 and use initial timestamp retrieved from peer table.
151 */
152 if (tcptw->tw_ts_recent_stamp &&
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200153 (!twp || (reuse && time_after32(ktime_get_seconds(),
154 tcptw->tw_ts_recent_stamp)))) {
Stefan Baranoff21684dc2018-07-10 17:25:20 -0400155 /* In case of repair and re-using TIME-WAIT sockets we still
156 * want to be sure that it is safe as above but honor the
157 * sequence numbers and time stamps set as part of the repair
158 * process.
159 *
160 * Without this check re-using a TIME-WAIT socket with TCP
161 * repair would accumulate a -1 on the repair assigned
162 * sequence number. The first time it is reused the sequence
163 * is -1, the second time -2, etc. This fixes that issue
164 * without appearing to create any others.
165 */
166 if (likely(!tp->repair)) {
167 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
168 if (tp->write_seq == 0)
169 tp->write_seq = 1;
170 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
171 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
172 }
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800173 sock_hold(sktw);
174 return 1;
175 }
176
177 return 0;
178}
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800179EXPORT_SYMBOL_GPL(tcp_twsk_unique);
180
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700181static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
182 int addr_len)
183{
184 /* This check is replicated from tcp_v4_connect() and intended to
185 * prevent BPF program called below from accessing bytes that are out
186 * of the bound specified by user in addr_len.
187 */
188 if (addr_len < sizeof(struct sockaddr_in))
189 return -EINVAL;
190
191 sock_owned_by_me(sk);
192
193 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
194}
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196/* This will initiate an outgoing connection. */
197int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
198{
David S. Miller2d7192d2011-04-26 13:28:44 -0700199 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 struct inet_sock *inet = inet_sk(sk);
201 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerdca8b082011-02-24 13:38:12 -0800202 __be16 orig_sport, orig_dport;
Al Virobada8ad2006-09-26 21:27:15 -0700203 __be32 daddr, nexthop;
David S. Millerda905bd2011-05-06 16:11:19 -0700204 struct flowi4 *fl4;
David S. Miller2d7192d2011-04-26 13:28:44 -0700205 struct rtable *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 int err;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000207 struct ip_options_rcu *inet_opt;
Haishuang Yan1946e672016-12-28 17:52:32 +0800208 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 if (addr_len < sizeof(struct sockaddr_in))
211 return -EINVAL;
212
213 if (usin->sin_family != AF_INET)
214 return -EAFNOSUPPORT;
215
216 nexthop = daddr = usin->sin_addr.s_addr;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000217 inet_opt = rcu_dereference_protected(inet->inet_opt,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200218 lockdep_sock_is_held(sk));
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000219 if (inet_opt && inet_opt->opt.srr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 if (!daddr)
221 return -EINVAL;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000222 nexthop = inet_opt->opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 }
224
David S. Millerdca8b082011-02-24 13:38:12 -0800225 orig_sport = inet->inet_sport;
226 orig_dport = usin->sin_port;
David S. Millerda905bd2011-05-06 16:11:19 -0700227 fl4 = &inet->cork.fl.u.ip4;
228 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800229 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
230 IPPROTO_TCP,
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200231 orig_sport, orig_dport, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800232 if (IS_ERR(rt)) {
233 err = PTR_ERR(rt);
234 if (err == -ENETUNREACH)
Eric Dumazetf1d8cba2013-11-28 09:51:22 -0800235 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800236 return err;
Wei Dong584bdf82007-05-31 22:49:28 -0700237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
239 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
240 ip_rt_put(rt);
241 return -ENETUNREACH;
242 }
243
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000244 if (!inet_opt || !inet_opt->opt.srr)
David S. Millerda905bd2011-05-06 16:11:19 -0700245 daddr = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000247 if (!inet->inet_saddr)
David S. Millerda905bd2011-05-06 16:11:19 -0700248 inet->inet_saddr = fl4->saddr;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700249 sk_rcv_saddr_set(sk, inet->inet_saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000251 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 /* Reset inherited state */
253 tp->rx_opt.ts_recent = 0;
254 tp->rx_opt.ts_recent_stamp = 0;
Pavel Emelyanovee995282012-04-19 03:40:39 +0000255 if (likely(!tp->repair))
256 tp->write_seq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000259 inet->inet_dport = usin->sin_port;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700260 sk_daddr_set(sk, daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800262 inet_csk(sk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000263 if (inet_opt)
264 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000266 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268 /* Socket identity is still unknown (sport may be zero).
269 * However we set state to SYN-SENT and not releasing socket
270 * lock select source port, enter ourselves into the hash tables and
271 * complete initialization after this.
272 */
273 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800274 err = inet_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 if (err)
276 goto failure;
277
Tom Herbert877d1f62015-07-28 16:02:05 -0700278 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530279
David S. Millerda905bd2011-05-06 16:11:19 -0700280 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800281 inet->inet_sport, inet->inet_dport, sk);
282 if (IS_ERR(rt)) {
283 err = PTR_ERR(rt);
284 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 goto failure;
David S. Millerb23dd4f2011-03-02 14:31:35 -0800286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 /* OK, now commit destination to socket. */
Herbert Xubcd76112006-06-30 13:36:35 -0700288 sk->sk_gso_type = SKB_GSO_TCPV4;
Changli Gaod8d1f302010-06-10 23:31:35 -0700289 sk_setup_caps(sk, &rt->dst);
Wei Wang19f6d3f2017-01-23 10:59:22 -0800290 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300292 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300293 if (!tp->write_seq)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700294 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
295 inet->inet_daddr,
296 inet->inet_sport,
297 usin->sin_port);
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700298 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
299 inet->inet_saddr,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700300 inet->inet_daddr);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300301 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000303 inet->inet_id = tp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Wei Wang19f6d3f2017-01-23 10:59:22 -0800305 if (tcp_fastopen_defer_connect(sk, &err))
306 return err;
307 if (err)
308 goto failure;
309
Andrey Vagin2b916472012-11-22 01:13:58 +0000310 err = tcp_connect(sk);
Pavel Emelyanovee995282012-04-19 03:40:39 +0000311
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 if (err)
313 goto failure;
314
315 return 0;
316
317failure:
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200318 /*
319 * This unhashes the socket and releases the local port,
320 * if necessary.
321 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 tcp_set_state(sk, TCP_CLOSE);
323 ip_rt_put(rt);
324 sk->sk_route_caps = 0;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000325 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 return err;
327}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000328EXPORT_SYMBOL(tcp_v4_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330/*
Eric Dumazet563d34d2012-07-23 09:48:52 +0200331 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
332 * It can be called through tcp_release_cb() if socket was owned by user
333 * at the time tcp_v4_err() was called to handle ICMP message.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 */
Neal Cardwell4fab9072014-08-14 12:40:05 -0400335void tcp_v4_mtu_reduced(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800338 struct dst_entry *dst;
339 u32 mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800341 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
342 return;
343 mtu = tcp_sk(sk)->mtu_info;
David S. Miller80d0a692012-07-16 03:28:06 -0700344 dst = inet_csk_update_pmtu(sk, mtu);
345 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 return;
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 /* Something is about to be wrong... Remember soft error
349 * for the case, if this connection will not able to recover.
350 */
351 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
352 sk->sk_err_soft = EMSGSIZE;
353
354 mtu = dst_mtu(dst);
355
356 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +0100357 ip_sk_accept_pmtu(sk) &&
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800358 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 tcp_sync_mss(sk, mtu);
360
361 /* Resend the TCP packet because it's
362 * clear that the old packet has been
363 * dropped. This is the new "fast" path mtu
364 * discovery.
365 */
366 tcp_simple_retransmit(sk);
367 } /* else let the usual retransmit timer handle it */
368}
Neal Cardwell4fab9072014-08-14 12:40:05 -0400369EXPORT_SYMBOL(tcp_v4_mtu_reduced);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
David S. Miller55be7a92012-07-11 21:27:49 -0700371static void do_redirect(struct sk_buff *skb, struct sock *sk)
372{
373 struct dst_entry *dst = __sk_dst_check(sk, 0);
374
David S. Miller1ed5c482012-07-12 00:41:25 -0700375 if (dst)
David S. Miller6700c272012-07-17 03:29:28 -0700376 dst->ops->redirect(dst, sk, skb);
David S. Miller55be7a92012-07-11 21:27:49 -0700377}
378
Eric Dumazet26e37362015-03-22 10:22:22 -0700379
380/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
Eric Dumazet9cf74902016-02-02 19:31:12 -0800381void tcp_req_err(struct sock *sk, u32 seq, bool abort)
Eric Dumazet26e37362015-03-22 10:22:22 -0700382{
383 struct request_sock *req = inet_reqsk(sk);
384 struct net *net = sock_net(sk);
385
386 /* ICMPs are not backlogged, hence we cannot get
387 * an established socket here.
388 */
Eric Dumazet26e37362015-03-22 10:22:22 -0700389 if (seq != tcp_rsk(req)->snt_isn) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700390 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800391 } else if (abort) {
Eric Dumazet26e37362015-03-22 10:22:22 -0700392 /*
393 * Still in SYN_RECV, just remove it silently.
394 * There is no good way to pass the error to the newly
395 * created socket, and POSIX does not want network
396 * errors returned from accept().
397 */
Fan Duc6973662015-03-23 15:00:41 -0700398 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
Eric Dumazet9caad862016-04-01 08:52:20 -0700399 tcp_listendrop(req->rsk_listener);
Eric Dumazet26e37362015-03-22 10:22:22 -0700400 }
Eric Dumazetef84d8c2015-10-14 11:16:26 -0700401 reqsk_put(req);
Eric Dumazet26e37362015-03-22 10:22:22 -0700402}
403EXPORT_SYMBOL(tcp_req_err);
404
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405/*
406 * This routine is called by the ICMP module when it gets some
407 * sort of error condition. If err < 0 then the socket should
408 * be closed and the error returned to the user. If err > 0
409 * it's just the icmp type << 8 | icmp code. After adjustment
410 * header points to the first 8 bytes of the tcp header. We need
411 * to find the appropriate port.
412 *
413 * The locking strategy used here is very "optimistic". When
414 * someone else accesses the socket the ICMP is just dropped
415 * and for some paths there is no check at all.
416 * A more general error queue to queue errors for later handling
417 * is probably better.
418 *
419 */
420
Stefano Brivio32bbd872018-11-08 12:19:21 +0100421int tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000423 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000424 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000425 struct inet_connection_sock *icsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 struct tcp_sock *tp;
427 struct inet_sock *inet;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000428 const int type = icmp_hdr(icmp_skb)->type;
429 const int code = icmp_hdr(icmp_skb)->code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 struct sock *sk;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000431 struct sk_buff *skb;
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700432 struct request_sock *fastopen;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700433 u32 seq, snd_una;
434 s32 remaining;
435 u32 delta_us;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 int err;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000437 struct net *net = dev_net(icmp_skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
Eric Dumazet26e37362015-03-22 10:22:22 -0700439 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
440 th->dest, iph->saddr, ntohs(th->source),
David Ahern3fa6f612017-08-07 08:44:17 -0700441 inet_iif(icmp_skb), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442 if (!sk) {
Eric Dumazet5d3848b2016-04-27 16:44:29 -0700443 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100444 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 }
446 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700447 inet_twsk_put(inet_twsk(sk));
Stefano Brivio32bbd872018-11-08 12:19:21 +0100448 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 }
Eric Dumazet26e37362015-03-22 10:22:22 -0700450 seq = ntohl(th->seq);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100451 if (sk->sk_state == TCP_NEW_SYN_RECV) {
452 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
453 type == ICMP_TIME_EXCEEDED ||
454 (type == ICMP_DEST_UNREACH &&
455 (code == ICMP_NET_UNREACH ||
456 code == ICMP_HOST_UNREACH)));
457 return 0;
458 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
460 bh_lock_sock(sk);
461 /* If too many ICMPs get dropped on busy
462 * servers this needs to be solved differently.
Eric Dumazet563d34d2012-07-23 09:48:52 +0200463 * We do take care of PMTU discovery (RFC1191) special case :
464 * we can receive locally generated ICMP messages while socket is held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 */
Eric Dumazetb74aa932013-01-19 16:10:37 +0000466 if (sock_owned_by_user(sk)) {
467 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700468 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Eric Dumazetb74aa932013-01-19 16:10:37 +0000469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 if (sk->sk_state == TCP_CLOSE)
471 goto out;
472
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000473 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700474 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000475 goto out;
476 }
477
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000478 icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700480 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
481 fastopen = tp->fastopen_rsk;
482 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700484 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700485 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 goto out;
487 }
488
489 switch (type) {
David S. Miller55be7a92012-07-11 21:27:49 -0700490 case ICMP_REDIRECT:
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100491 if (!sock_owned_by_user(sk))
492 do_redirect(icmp_skb, sk);
David S. Miller55be7a92012-07-11 21:27:49 -0700493 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 case ICMP_SOURCE_QUENCH:
495 /* Just silently ignore these. */
496 goto out;
497 case ICMP_PARAMETERPROB:
498 err = EPROTO;
499 break;
500 case ICMP_DEST_UNREACH:
501 if (code > NR_ICMP_UNREACH)
502 goto out;
503
504 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000505 /* We are not interested in TCP_LISTEN and open_requests
506 * (SYN-ACKs send out by Linux are always <576bytes so
507 * they should go through unfragmented).
508 */
509 if (sk->sk_state == TCP_LISTEN)
510 goto out;
511
Eric Dumazet563d34d2012-07-23 09:48:52 +0200512 tp->mtu_info = info;
Eric Dumazet144d56e2012-08-20 00:22:46 +0000513 if (!sock_owned_by_user(sk)) {
Eric Dumazet563d34d2012-07-23 09:48:52 +0200514 tcp_v4_mtu_reduced(sk);
Eric Dumazet144d56e2012-08-20 00:22:46 +0000515 } else {
Eric Dumazet7aa54702016-12-03 11:14:57 -0800516 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
Eric Dumazet144d56e2012-08-20 00:22:46 +0000517 sock_hold(sk);
518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 goto out;
520 }
521
522 err = icmp_err_convert[code].errno;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000523 /* check if icmp_skb allows revert of backoff
524 * (see draft-zimmermann-tcp-lcd) */
525 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
526 break;
527 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700528 !icsk->icsk_backoff || fastopen)
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000529 break;
530
David S. Miller8f49c272010-11-12 13:35:00 -0800531 if (sock_owned_by_user(sk))
532 break;
533
Eric Dumazet2c4cc972019-02-15 13:36:21 -0800534 skb = tcp_rtx_queue_head(sk);
535 if (WARN_ON_ONCE(!skb))
536 break;
537
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000538 icsk->icsk_backoff--;
Eric Dumazetfcdd1cf2014-09-22 13:19:44 -0700539 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
540 TCP_TIMEOUT_INIT;
541 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000542
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000543
Eric Dumazet9a568de2017-05-16 14:00:14 -0700544 tcp_mstamp_refresh(tp);
Eric Dumazet2fd66ff2018-09-21 08:51:47 -0700545 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
Eric Dumazet7faee5c2014-09-05 15:33:33 -0700546 remaining = icsk->icsk_rto -
Eric Dumazet9a568de2017-05-16 14:00:14 -0700547 usecs_to_jiffies(delta_us);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000548
Eric Dumazet9a568de2017-05-16 14:00:14 -0700549 if (remaining > 0) {
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000550 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
551 remaining, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000552 } else {
553 /* RTO revert clocked out retransmission.
554 * Will retransmit now */
555 tcp_retransmit_timer(sk);
556 }
557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 break;
559 case ICMP_TIME_EXCEEDED:
560 err = EHOSTUNREACH;
561 break;
562 default:
563 goto out;
564 }
565
566 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700568 case TCP_SYN_RECV:
569 /* Only in fast or simultaneous open. If a fast open socket is
570 * is already accepted it is treated as a connected one below.
571 */
Ian Morris51456b22015-04-03 09:17:26 +0100572 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700573 break;
574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 sk->sk_err = err;
577
578 sk->sk_error_report(sk);
579
580 tcp_done(sk);
581 } else {
582 sk->sk_err_soft = err;
583 }
584 goto out;
585 }
586
587 /* If we've already connected we will keep trying
588 * until we time out, or the user gives up.
589 *
590 * rfc1122 4.2.3.9 allows to consider as hard errors
591 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
592 * but it is obsoleted by pmtu discovery).
593 *
594 * Note, that in modern internet, where routing is unreliable
595 * and in each dark corner broken firewalls sit, sending random
596 * errors ordered by their masters even this two messages finally lose
597 * their original sense (even Linux sends invalid PORT_UNREACHs)
598 *
599 * Now we are in compliance with RFCs.
600 * --ANK (980905)
601 */
602
603 inet = inet_sk(sk);
604 if (!sock_owned_by_user(sk) && inet->recverr) {
605 sk->sk_err = err;
606 sk->sk_error_report(sk);
607 } else { /* Only an error on timeout */
608 sk->sk_err_soft = err;
609 }
610
611out:
612 bh_unlock_sock(sk);
613 sock_put(sk);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100614 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615}
616
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000617void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618{
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700619 struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
Eric Dumazet98be9b12018-02-19 11:56:52 -0800621 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
622 skb->csum_start = skb_transport_header(skb) - skb->head;
623 skb->csum_offset = offsetof(struct tcphdr, check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624}
625
Herbert Xu419f9f82010-04-11 02:15:53 +0000626/* This routine computes an IPv4 TCP checksum. */
Herbert Xubb296242010-04-11 02:15:55 +0000627void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
Herbert Xu419f9f82010-04-11 02:15:53 +0000628{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400629 const struct inet_sock *inet = inet_sk(sk);
Herbert Xu419f9f82010-04-11 02:15:53 +0000630
631 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
632}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000633EXPORT_SYMBOL(tcp_v4_send_check);
Herbert Xu419f9f82010-04-11 02:15:53 +0000634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635/*
636 * This routine will send an RST to the other tcp.
637 *
638 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
639 * for reset.
640 * Answer: if a packet caused RST, it is not for a socket
641 * existing in our system, if it is matched to a socket,
642 * it is just duplicate segment or bug in other side's TCP.
643 * So that we build reply only basing on parameters
644 * arrived with segment.
645 * Exception: precedence violation. We do not implement it in any case.
646 */
647
Eric Dumazeta00e7442015-09-29 07:42:39 -0700648static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400650 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800651 struct {
652 struct tcphdr th;
653#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800654 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800655#endif
656 } rep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 struct ip_reply_arg arg;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800658#ifdef CONFIG_TCP_MD5SIG
Florian Westphale46787f2015-12-21 21:29:25 +0100659 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000660 const __u8 *hash_location = NULL;
661 unsigned char newhash[16];
662 int genhash;
663 struct sock *sk1 = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800664#endif
Pavel Emelyanova86b1e32008-07-16 20:20:58 -0700665 struct net *net;
Jon Maxwell00483692018-05-10 16:53:51 +1000666 struct sock *ctl_sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668 /* Never send a reset in response to a reset. */
669 if (th->rst)
670 return;
671
Eric Dumazetc3658e82014-11-25 07:40:04 -0800672 /* If sk not NULL, it means we did a successful lookup and incoming
673 * route had to be correct. prequeue might have dropped our dst.
674 */
675 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 return;
677
678 /* Swap the send and the receive. */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800679 memset(&rep, 0, sizeof(rep));
680 rep.th.dest = th->source;
681 rep.th.source = th->dest;
682 rep.th.doff = sizeof(struct tcphdr) / 4;
683 rep.th.rst = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684
685 if (th->ack) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800686 rep.th.seq = th->ack_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800688 rep.th.ack = 1;
689 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
690 skb->len - (th->doff << 2));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 }
692
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200693 memset(&arg, 0, sizeof(arg));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800694 arg.iov[0].iov_base = (unsigned char *)&rep;
695 arg.iov[0].iov_len = sizeof(rep.th);
696
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800697 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800698#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700699 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000700 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100701 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100702 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
703 &ip_hdr(skb)->saddr, AF_INET);
704 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000705 /*
706 * active side is lost. Try to find listening socket through
707 * source port, and then find md5 key through listening socket.
708 * we are not loose security here:
709 * Incoming packet is checked with md5 hash with finding key,
710 * no RST generated if md5 hash doesn't match.
711 */
Craig Galleka5836362016-02-10 11:50:38 -0500712 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
713 ip_hdr(skb)->saddr,
Tom Herbertda5e3632013-01-22 09:50:24 +0000714 th->source, ip_hdr(skb)->daddr,
David Ahern3fa6f612017-08-07 08:44:17 -0700715 ntohs(th->source), inet_iif(skb),
716 tcp_v4_sdif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000717 /* don't send rst if it can't find key */
718 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700719 goto out;
720
Shawn Lu658ddaa2012-01-31 22:35:48 +0000721 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
722 &ip_hdr(skb)->saddr, AF_INET);
723 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700724 goto out;
725
Shawn Lu658ddaa2012-01-31 22:35:48 +0000726
Eric Dumazet39f8e582015-03-24 15:58:55 -0700727 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000728 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700729 goto out;
730
Shawn Lu658ddaa2012-01-31 22:35:48 +0000731 }
732
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800733 if (key) {
734 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
735 (TCPOPT_NOP << 16) |
736 (TCPOPT_MD5SIG << 8) |
737 TCPOLEN_MD5SIG);
738 /* Update length and the length the header thinks exists */
739 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
740 rep.th.doff = arg.iov[0].iov_len / 4;
741
Adam Langley49a72df2008-07-19 00:01:42 -0700742 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
Ilpo Järvinen78e645cb2008-10-09 14:37:47 -0700743 key, ip_hdr(skb)->saddr,
744 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800745 }
746#endif
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700747 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
748 ip_hdr(skb)->saddr, /* XXX */
Ilpo Järvinen52cd5752008-10-08 11:34:06 -0700749 arg.iov[0].iov_len, IPPROTO_TCP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
Florian Westphal271c3b92015-12-21 21:29:26 +0100751 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
752
Shawn Lue2446ea2012-02-04 12:38:09 +0000753 /* When socket is gone, all binding information is lost.
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000754 * routing might fail in this case. No choice here, if we choose to force
755 * input interface, we will misroute in case of asymmetric route.
Shawn Lue2446ea2012-02-04 12:38:09 +0000756 */
Song Liuc24b14c42017-10-23 09:20:24 -0700757 if (sk) {
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000758 arg.bound_dev_if = sk->sk_bound_dev_if;
Song Liu5c487bb2018-02-06 20:50:23 -0800759 if (sk_fullsock(sk))
760 trace_tcp_send_reset(sk, skb);
Song Liuc24b14c42017-10-23 09:20:24 -0700761 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Florian Westphal271c3b92015-12-21 21:29:26 +0100763 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
764 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
765
Eric Dumazet66b13d92011-10-24 03:06:21 -0400766 arg.tos = ip_hdr(skb)->tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900767 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700768 local_bh_disable();
Jon Maxwell00483692018-05-10 16:53:51 +1000769 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
770 if (sk)
771 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
772 inet_twsk(sk)->tw_mark : sk->sk_mark;
773 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800774 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700775 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
776 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
Jon Maxwell00483692018-05-10 16:53:51 +1000778 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700779 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
780 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700781 local_bh_enable();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000782
783#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700784out:
785 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000786#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787}
788
789/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
790 outside socket context is ugly, certainly. What can I do?
791 */
792
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900793static void tcp_v4_send_ack(const struct sock *sk,
Eric Dumazete62a1232016-01-21 08:02:54 -0800794 struct sk_buff *skb, u32 seq, u32 ack,
Andrey Vaginee684b62013-02-11 05:50:19 +0000795 u32 win, u32 tsval, u32 tsecr, int oif,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700796 struct tcp_md5sig_key *key,
Eric Dumazet66b13d92011-10-24 03:06:21 -0400797 int reply_flags, u8 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400799 const struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 struct {
801 struct tcphdr th;
Al Viro714e85b2006-11-14 20:51:49 -0800802 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800803#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800804 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800805#endif
806 ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 } rep;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900808 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 struct ip_reply_arg arg;
Jon Maxwell00483692018-05-10 16:53:51 +1000810 struct sock *ctl_sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
812 memset(&rep.th, 0, sizeof(struct tcphdr));
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200813 memset(&arg, 0, sizeof(arg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815 arg.iov[0].iov_base = (unsigned char *)&rep;
816 arg.iov[0].iov_len = sizeof(rep.th);
Andrey Vaginee684b62013-02-11 05:50:19 +0000817 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800818 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
819 (TCPOPT_TIMESTAMP << 8) |
820 TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000821 rep.opt[1] = htonl(tsval);
822 rep.opt[2] = htonl(tsecr);
Craig Schlentercb48cfe2007-01-09 00:11:15 -0800823 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 }
825
826 /* Swap the send and the receive. */
827 rep.th.dest = th->source;
828 rep.th.source = th->dest;
829 rep.th.doff = arg.iov[0].iov_len / 4;
830 rep.th.seq = htonl(seq);
831 rep.th.ack_seq = htonl(ack);
832 rep.th.ack = 1;
833 rep.th.window = htons(win);
834
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800835#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800836 if (key) {
Andrey Vaginee684b62013-02-11 05:50:19 +0000837 int offset = (tsecr) ? 3 : 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800838
839 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
840 (TCPOPT_NOP << 16) |
841 (TCPOPT_MD5SIG << 8) |
842 TCPOLEN_MD5SIG);
843 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
844 rep.th.doff = arg.iov[0].iov_len/4;
845
Adam Langley49a72df2008-07-19 00:01:42 -0700846 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
Adam Langley90b7e112008-07-31 20:49:48 -0700847 key, ip_hdr(skb)->saddr,
848 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800849 }
850#endif
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700851 arg.flags = reply_flags;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700852 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
853 ip_hdr(skb)->saddr, /* XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 arg.iov[0].iov_len, IPPROTO_TCP, 0);
855 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900856 if (oif)
857 arg.bound_dev_if = oif;
Eric Dumazet66b13d92011-10-24 03:06:21 -0400858 arg.tos = tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900859 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700860 local_bh_disable();
Jon Maxwell00483692018-05-10 16:53:51 +1000861 ctl_sk = *this_cpu_ptr(net->ipv4.tcp_sk);
862 if (sk)
863 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
864 inet_twsk(sk)->tw_mark : sk->sk_mark;
865 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800866 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700867 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
868 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
Jon Maxwell00483692018-05-10 16:53:51 +1000870 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700871 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700872 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873}
874
875static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
876{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700877 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800878 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900880 tcp_v4_send_ack(sk, skb,
Eric Dumazete62a1232016-01-21 08:02:54 -0800881 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200882 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700883 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900884 tcptw->tw_ts_recent,
885 tw->tw_bound_dev_if,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700886 tcp_twsk_md5_key(tcptw),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400887 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
888 tw->tw_tos
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900889 );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700891 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892}
893
Eric Dumazeta00e7442015-09-29 07:42:39 -0700894static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200895 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896{
Jerry Chu168a8f52012-08-31 12:29:13 +0000897 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
898 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
899 */
Eric Dumazete62a1232016-01-21 08:02:54 -0800900 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
901 tcp_sk(sk)->snd_nxt;
902
Eric Dumazet20a2b492016-08-22 11:31:10 -0700903 /* RFC 7323 2.3
904 * The window field (SEG.WND) of every outgoing segment, with the
905 * exception of <SYN> segments, MUST be right-shifted by
906 * Rcv.Wind.Shift bits:
907 */
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900908 tcp_v4_send_ack(sk, skb, seq,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700909 tcp_rsk(req)->rcv_nxt,
910 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700911 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900912 req->ts_recent,
913 0,
Christoph Paasch30791ac2017-12-11 00:05:46 -0800914 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000915 AF_INET),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400916 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
917 ip_hdr(skb)->tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918}
919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920/*
Kris Katterjohn9bf1d832008-02-17 22:29:19 -0800921 * Send a SYN-ACK after having received a SYN.
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700922 * This still operates on a request_sock only, not on a big
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 * socket.
924 */
Eric Dumazet0f935db2015-09-25 07:39:21 -0700925static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300926 struct flowi *fl,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800927 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700928 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700929 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700931 const struct inet_request_sock *ireq = inet_rsk(req);
David S. Miller6bd023f2011-05-18 18:32:03 -0400932 struct flowi4 fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 int err = -1;
Weilong Chend41db5a2013-12-23 14:37:28 +0800934 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936 /* First, grab a route. */
David S. Millerba3f7f02012-07-17 14:02:46 -0700937 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800938 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939
Eric Dumazetb3d05142016-04-13 22:05:39 -0700940 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941
942 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700943 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944
Eric Dumazet2ab2ddd2018-10-02 12:35:05 -0700945 rcu_read_lock();
Eric Dumazet634fb9792013-10-09 15:21:29 -0700946 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
947 ireq->ir_rmt_addr,
Eric Dumazet2ab2ddd2018-10-02 12:35:05 -0700948 rcu_dereference(ireq->ireq_opt));
949 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200950 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 }
952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 return err;
954}
955
956/*
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700957 * IPv4 request_sock destructor.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 */
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700959static void tcp_v4_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960{
Eric Dumazetc92e8c02017-10-20 09:04:13 -0700961 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962}
963
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800964#ifdef CONFIG_TCP_MD5SIG
965/*
966 * RFC2385 MD5 checksumming requires a mapping of
967 * IP address->MD5 Key.
968 * We need to maintain these in the sk structure.
969 */
970
Eric Dumazet921f9a02019-02-26 09:49:11 -0800971DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
Eric Dumazet6015c712018-11-27 15:03:21 -0800972EXPORT_SYMBOL(tcp_md5_needed);
973
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800974/* Find the Key structure for an address. */
Eric Dumazet6015c712018-11-27 15:03:21 -0800975struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
976 const union tcp_md5_addr *addr,
977 int family)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800978{
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700979 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000980 struct tcp_md5sig_key *key;
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700981 const struct tcp_md5sig_info *md5sig;
Ivan Delalande67973182017-06-15 18:07:06 -0700982 __be32 mask;
983 struct tcp_md5sig_key *best_match = NULL;
984 bool match;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800985
Eric Dumazeta8afca02012-01-31 18:45:40 +0000986 /* caller either holds rcu_read_lock() or socket lock */
987 md5sig = rcu_dereference_check(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200988 lockdep_sock_is_held(sk));
Eric Dumazeta8afca02012-01-31 18:45:40 +0000989 if (!md5sig)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800990 return NULL;
Arnd Bergmann083a0322017-06-20 22:11:21 +0200991
Sasha Levinb67bfe02013-02-27 17:06:00 -0800992 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000993 if (key->family != family)
994 continue;
Ivan Delalande67973182017-06-15 18:07:06 -0700995
996 if (family == AF_INET) {
997 mask = inet_make_mask(key->prefixlen);
998 match = (key->addr.a4.s_addr & mask) ==
999 (addr->a4.s_addr & mask);
1000#if IS_ENABLED(CONFIG_IPV6)
1001 } else if (family == AF_INET6) {
1002 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1003 key->prefixlen);
1004#endif
1005 } else {
1006 match = false;
1007 }
1008
1009 if (match && (!best_match ||
1010 key->prefixlen > best_match->prefixlen))
1011 best_match = key;
1012 }
1013 return best_match;
1014}
Eric Dumazet6015c712018-11-27 15:03:21 -08001015EXPORT_SYMBOL(__tcp_md5_do_lookup);
Ivan Delalande67973182017-06-15 18:07:06 -07001016
Wu Fengguange8f37d52017-07-06 07:58:53 +08001017static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1018 const union tcp_md5_addr *addr,
1019 int family, u8 prefixlen)
Ivan Delalande67973182017-06-15 18:07:06 -07001020{
1021 const struct tcp_sock *tp = tcp_sk(sk);
1022 struct tcp_md5sig_key *key;
1023 unsigned int size = sizeof(struct in_addr);
1024 const struct tcp_md5sig_info *md5sig;
1025
1026 /* caller either holds rcu_read_lock() or socket lock */
1027 md5sig = rcu_dereference_check(tp->md5sig_info,
1028 lockdep_sock_is_held(sk));
1029 if (!md5sig)
1030 return NULL;
1031#if IS_ENABLED(CONFIG_IPV6)
1032 if (family == AF_INET6)
1033 size = sizeof(struct in6_addr);
1034#endif
1035 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
1036 if (key->family != family)
1037 continue;
1038 if (!memcmp(&key->addr, addr, size) &&
1039 key->prefixlen == prefixlen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001040 return key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001041 }
1042 return NULL;
1043}
1044
Eric Dumazetb83e3de2015-09-25 07:39:15 -07001045struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001046 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001047{
Eric Dumazetb52e6922015-04-09 14:36:42 -07001048 const union tcp_md5_addr *addr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001049
Eric Dumazetb52e6922015-04-09 14:36:42 -07001050 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001051 return tcp_md5_do_lookup(sk, addr, AF_INET);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001052}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001053EXPORT_SYMBOL(tcp_v4_md5_lookup);
1054
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001055/* This can be called on a newly created socket, from other files */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001056int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
Ivan Delalande67973182017-06-15 18:07:06 -07001057 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
1058 gfp_t gfp)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001059{
1060 /* Add Key to the list */
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001061 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001062 struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001063 struct tcp_md5sig_info *md5sig;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001064
Ivan Delalande67973182017-06-15 18:07:06 -07001065 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001066 if (key) {
1067 /* Pre-existing entry - just update that one. */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001068 memcpy(key->key, newkey, newkeylen);
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001069 key->keylen = newkeylen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001070 return 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001071 }
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001072
Eric Dumazeta8afca02012-01-31 18:45:40 +00001073 md5sig = rcu_dereference_protected(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +02001074 lockdep_sock_is_held(sk));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001075 if (!md5sig) {
1076 md5sig = kmalloc(sizeof(*md5sig), gfp);
1077 if (!md5sig)
1078 return -ENOMEM;
1079
1080 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1081 INIT_HLIST_HEAD(&md5sig->head);
Eric Dumazeta8afca02012-01-31 18:45:40 +00001082 rcu_assign_pointer(tp->md5sig_info, md5sig);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001083 }
1084
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001085 key = sock_kmalloc(sk, sizeof(*key), gfp);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001086 if (!key)
1087 return -ENOMEM;
Eric Dumazet71cea172013-05-20 06:52:26 +00001088 if (!tcp_alloc_md5sig_pool()) {
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001089 sock_kfree_s(sk, key, sizeof(*key));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001090 return -ENOMEM;
1091 }
1092
1093 memcpy(key->key, newkey, newkeylen);
1094 key->keylen = newkeylen;
1095 key->family = family;
Ivan Delalande67973182017-06-15 18:07:06 -07001096 key->prefixlen = prefixlen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001097 memcpy(&key->addr, addr,
1098 (family == AF_INET6) ? sizeof(struct in6_addr) :
1099 sizeof(struct in_addr));
1100 hlist_add_head_rcu(&key->node, &md5sig->head);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001101 return 0;
1102}
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001103EXPORT_SYMBOL(tcp_md5_do_add);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001104
Ivan Delalande67973182017-06-15 18:07:06 -07001105int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1106 u8 prefixlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001107{
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001108 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001109
Ivan Delalande67973182017-06-15 18:07:06 -07001110 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001111 if (!key)
1112 return -ENOENT;
1113 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001114 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001115 kfree_rcu(key, rcu);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001116 return 0;
1117}
1118EXPORT_SYMBOL(tcp_md5_do_del);
1119
stephen hemmingere0683e702012-10-26 14:31:40 +00001120static void tcp_clear_md5_list(struct sock *sk)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001121{
1122 struct tcp_sock *tp = tcp_sk(sk);
1123 struct tcp_md5sig_key *key;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001124 struct hlist_node *n;
Eric Dumazeta8afca02012-01-31 18:45:40 +00001125 struct tcp_md5sig_info *md5sig;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001126
Eric Dumazeta8afca02012-01-31 18:45:40 +00001127 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1128
Sasha Levinb67bfe02013-02-27 17:06:00 -08001129 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001130 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001131 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001132 kfree_rcu(key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001133 }
1134}
1135
Ivan Delalande8917a772017-06-15 18:07:07 -07001136static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1137 char __user *optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001138{
1139 struct tcp_md5sig cmd;
1140 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
Ivan Delalande8917a772017-06-15 18:07:07 -07001141 u8 prefixlen = 32;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001142
1143 if (optlen < sizeof(cmd))
1144 return -EINVAL;
1145
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02001146 if (copy_from_user(&cmd, optval, sizeof(cmd)))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001147 return -EFAULT;
1148
1149 if (sin->sin_family != AF_INET)
1150 return -EINVAL;
1151
Ivan Delalande8917a772017-06-15 18:07:07 -07001152 if (optname == TCP_MD5SIG_EXT &&
1153 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1154 prefixlen = cmd.tcpm_prefixlen;
1155 if (prefixlen > 32)
1156 return -EINVAL;
1157 }
1158
Dmitry Popov64a124e2014-08-03 22:45:19 +04001159 if (!cmd.tcpm_keylen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001160 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001161 AF_INET, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001162
1163 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1164 return -EINVAL;
1165
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001166 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001167 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001168 GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001169}
1170
Eric Dumazet19689e32016-06-27 18:51:53 +02001171static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1172 __be32 daddr, __be32 saddr,
1173 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001174{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001175 struct tcp4_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -07001176 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +02001177 struct tcphdr *_th;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001178
Eric Dumazet19689e32016-06-27 18:51:53 +02001179 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001180 bp->saddr = saddr;
1181 bp->daddr = daddr;
1182 bp->pad = 0;
YOSHIFUJI Hideaki076fb722008-04-17 12:48:12 +09001183 bp->protocol = IPPROTO_TCP;
Adam Langley49a72df2008-07-19 00:01:42 -07001184 bp->len = cpu_to_be16(nbytes);
David S. Millerc7da57a2007-10-26 00:41:21 -07001185
Eric Dumazet19689e32016-06-27 18:51:53 +02001186 _th = (struct tcphdr *)(bp + 1);
1187 memcpy(_th, th, sizeof(*th));
1188 _th->check = 0;
1189
1190 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1191 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1192 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +08001193 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -07001194}
1195
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001196static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001197 __be32 daddr, __be32 saddr, const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -07001198{
1199 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001200 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -07001201
1202 hp = tcp_get_md5sig_pool();
1203 if (!hp)
1204 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001205 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001206
Herbert Xucf80e0e2016-01-24 21:20:23 +08001207 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001208 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +02001209 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -07001210 goto clear_hash;
1211 if (tcp_md5_hash_key(hp, key))
1212 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001213 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1214 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001215 goto clear_hash;
1216
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001217 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001218 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -07001219
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001220clear_hash:
1221 tcp_put_md5sig_pool();
1222clear_hash_noput:
1223 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -07001224 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001225}
1226
Eric Dumazet39f8e582015-03-24 15:58:55 -07001227int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1228 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001229 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001230{
Adam Langley49a72df2008-07-19 00:01:42 -07001231 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001232 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001233 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001234 __be32 saddr, daddr;
1235
Eric Dumazet39f8e582015-03-24 15:58:55 -07001236 if (sk) { /* valid for establish/request sockets */
1237 saddr = sk->sk_rcv_saddr;
1238 daddr = sk->sk_daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001239 } else {
Adam Langley49a72df2008-07-19 00:01:42 -07001240 const struct iphdr *iph = ip_hdr(skb);
1241 saddr = iph->saddr;
1242 daddr = iph->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001243 }
Adam Langley49a72df2008-07-19 00:01:42 -07001244
1245 hp = tcp_get_md5sig_pool();
1246 if (!hp)
1247 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001248 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001249
Herbert Xucf80e0e2016-01-24 21:20:23 +08001250 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001251 goto clear_hash;
1252
Eric Dumazet19689e32016-06-27 18:51:53 +02001253 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -07001254 goto clear_hash;
1255 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1256 goto clear_hash;
1257 if (tcp_md5_hash_key(hp, key))
1258 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001259 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1260 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001261 goto clear_hash;
1262
1263 tcp_put_md5sig_pool();
1264 return 0;
1265
1266clear_hash:
1267 tcp_put_md5sig_pool();
1268clear_hash_noput:
1269 memset(md5_hash, 0, 16);
1270 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001271}
Adam Langley49a72df2008-07-19 00:01:42 -07001272EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001273
Eric Dumazetba8e2752015-10-02 11:43:28 -07001274#endif
1275
Eric Dumazetff74e232015-03-24 15:58:54 -07001276/* Called with rcu_read_lock() */
Eric Dumazetba8e2752015-10-02 11:43:28 -07001277static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
Eric Dumazetff74e232015-03-24 15:58:54 -07001278 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001279{
Eric Dumazetba8e2752015-10-02 11:43:28 -07001280#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001281 /*
1282 * This gets called for each TCP segment that arrives
1283 * so we want to be efficient.
1284 * We have 3 drop cases:
1285 * o No MD5 hash and one expected.
1286 * o MD5 hash and we're not expecting one.
1287 * o MD5 hash and its wrong.
1288 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001289 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001290 struct tcp_md5sig_key *hash_expected;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001291 const struct iphdr *iph = ip_hdr(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001292 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001293 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001294 unsigned char newhash[16];
1295
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001296 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1297 AF_INET);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +09001298 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001299
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001300 /* We've parsed the options - do we have a hash? */
1301 if (!hash_expected && !hash_location)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001302 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001303
1304 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001305 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001306 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001307 }
1308
1309 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001310 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001311 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001312 }
1313
1314 /* Okay, so this is hash_expected and hash_location -
1315 * so we need to calculate the checksum.
1316 */
Adam Langley49a72df2008-07-19 00:01:42 -07001317 genhash = tcp_v4_md5_hash_skb(newhash,
1318 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -07001319 NULL, skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001320
1321 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -07001322 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +00001323 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1324 &iph->saddr, ntohs(th->source),
1325 &iph->daddr, ntohs(th->dest),
1326 genhash ? " tcp_v4_calc_md5_hash failed"
1327 : "");
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001328 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001329 }
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001330 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001331#endif
Eric Dumazetba8e2752015-10-02 11:43:28 -07001332 return false;
1333}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001334
Eric Dumazetb40cf182015-09-25 07:39:08 -07001335static void tcp_v4_init_req(struct request_sock *req,
1336 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +03001337 struct sk_buff *skb)
1338{
1339 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001340 struct net *net = sock_net(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +03001341
Eric Dumazet08d2cc3b2015-03-18 14:05:38 -07001342 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1343 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001344 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
Octavian Purdila16bea702014-06-25 17:09:53 +03001345}
1346
Eric Dumazetf9646292015-09-29 07:42:50 -07001347static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1348 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001349 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +03001350{
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001351 return inet_csk_route_req(sk, &fl->u.ip4, req);
Octavian Purdilad94e0412014-06-25 17:09:55 +03001352}
1353
Eric Dumazet72a3eff2006-11-16 02:30:37 -08001354struct request_sock_ops tcp_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355 .family = PF_INET,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001356 .obj_size = sizeof(struct tcp_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +03001357 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001358 .send_ack = tcp_v4_reqsk_send_ack,
1359 .destructor = tcp_v4_reqsk_destructor,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 .send_reset = tcp_v4_send_reset,
stephen hemminger688d1942014-08-29 23:32:05 -07001361 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362};
1363
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001364static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +03001365 .mss_clamp = TCP_MSS_DEFAULT,
Octavian Purdila16bea702014-06-25 17:09:53 +03001366#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001367 .req_md5_lookup = tcp_v4_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +00001368 .calc_md5_hash = tcp_v4_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -08001369#endif
Octavian Purdila16bea702014-06-25 17:09:53 +03001370 .init_req = tcp_v4_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001371#ifdef CONFIG_SYN_COOKIES
1372 .cookie_init_seq = cookie_v4_init_sequence,
1373#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +03001374 .route_req = tcp_v4_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -07001375 .init_seq = tcp_v4_init_seq,
1376 .init_ts_off = tcp_v4_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +03001377 .send_synack = tcp_v4_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +03001378};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001379
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1381{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 /* Never answer to SYNs send to broadcast or multicast */
Eric Dumazet511c3f92009-06-02 05:14:27 +00001383 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 goto drop;
1385
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001386 return tcp_conn_request(&tcp_request_sock_ops,
1387 &tcp_request_sock_ipv4_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001390 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 return 0;
1392}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001393EXPORT_SYMBOL(tcp_v4_conn_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
1395
1396/*
1397 * The three way handshake has completed - we got a valid synack -
1398 * now create the new socket.
1399 */
Eric Dumazet0c271712015-09-29 07:42:48 -07001400struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001401 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001402 struct dst_entry *dst,
1403 struct request_sock *req_unhash,
1404 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001406 struct inet_request_sock *ireq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 struct inet_sock *newinet;
1408 struct tcp_sock *newtp;
1409 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001410#ifdef CONFIG_TCP_MD5SIG
1411 struct tcp_md5sig_key *key;
1412#endif
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001413 struct ip_options_rcu *inet_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414
1415 if (sk_acceptq_is_full(sk))
1416 goto exit_overflow;
1417
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 newsk = tcp_create_openreq_child(sk, req, skb);
1419 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001420 goto exit_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421
Herbert Xubcd76112006-06-30 13:36:35 -07001422 newsk->sk_gso_type = SKB_GSO_TCPV4;
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001423 inet_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
1425 newtp = tcp_sk(newsk);
1426 newinet = inet_sk(newsk);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001427 ireq = inet_rsk(req);
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001428 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1429 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
David Ahern6dd9a142015-12-16 13:20:44 -08001430 newsk->sk_bound_dev_if = ireq->ir_iif;
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001431 newinet->inet_saddr = ireq->ir_loc_addr;
1432 inet_opt = rcu_dereference(ireq->ireq_opt);
1433 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001434 newinet->mc_index = inet_iif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001435 newinet->mc_ttl = ip_hdr(skb)->ttl;
Jiri Benc4c507d22012-02-09 09:35:49 +00001436 newinet->rcv_tos = ip_hdr(skb)->tos;
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001437 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001438 if (inet_opt)
1439 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001440 newinet->inet_id = newtp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
Eric Dumazetdfd25ff2012-03-10 09:20:21 +00001442 if (!dst) {
1443 dst = inet_csk_route_child_sock(sk, newsk, req);
1444 if (!dst)
1445 goto put_and_exit;
1446 } else {
1447 /* syncookie case : see end of cookie_v4_check() */
1448 }
David S. Miller0e734412011-05-08 15:28:03 -07001449 sk_setup_caps(newsk, dst);
1450
Daniel Borkmann81164412015-01-05 23:57:48 +01001451 tcp_ca_openreq_child(newsk, dst);
1452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001454 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Tom Quetchenbachf5fff5d2008-09-21 00:21:51 -07001455
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 tcp_initialize_rcv_mss(newsk);
1457
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001458#ifdef CONFIG_TCP_MD5SIG
1459 /* Copy over the MD5 key from the original socket */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001460 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1461 AF_INET);
Ian Morris00db4122015-04-03 09:17:27 +01001462 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001463 /*
1464 * We're using one, so create a matching key
1465 * on the newsk structure. If we fail to get
1466 * memory, then we end up not copying the key
1467 * across. Shucks.
1468 */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001469 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
Ivan Delalande67973182017-06-15 18:07:06 -07001470 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
Eric Dumazeta4654192010-05-16 00:36:33 -07001471 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001472 }
1473#endif
1474
David S. Miller0e734412011-05-08 15:28:03 -07001475 if (__inet_inherit_port(sk, newsk) < 0)
1476 goto put_and_exit;
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001477 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001478 if (likely(*own_req)) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001479 tcp_move_syn(newtp, req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001480 ireq->ireq_opt = NULL;
1481 } else {
1482 newinet->inet_opt = NULL;
1483 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 return newsk;
1485
1486exit_overflow:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001487 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001488exit_nonewsk:
1489 dst_release(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490exit:
Eric Dumazet9caad862016-04-01 08:52:20 -07001491 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 return NULL;
David S. Miller0e734412011-05-08 15:28:03 -07001493put_and_exit:
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001494 newinet->inet_opt = NULL;
Christoph Paasche337e242012-12-14 04:07:58 +00001495 inet_csk_prepare_forced_close(newsk);
1496 tcp_done(newsk);
David S. Miller0e734412011-05-08 15:28:03 -07001497 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001499EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500
Eric Dumazet079096f2015-10-02 11:43:32 -07001501static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001504 const struct tcphdr *th = tcp_hdr(skb);
1505
Florian Westphalaf9b4732010-06-03 00:43:44 +00001506 if (!th->syn)
Cong Wang461b74c2014-10-15 14:33:22 -07001507 sk = cookie_v4_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508#endif
1509 return sk;
1510}
1511
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001513 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 *
1515 * We have a potential double-lock case here, so even when
1516 * doing backlog processing we use the BH locking scheme.
1517 * This is because we cannot sleep with the original spinlock
1518 * held.
1519 */
1520int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1521{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001522 struct sock *rsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001523
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet404e0a82012-07-29 23:20:37 +00001525 struct dst_entry *dst = sk->sk_rx_dst;
1526
Tom Herbertbdeab992011-08-14 19:45:55 +00001527 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001528 sk_mark_napi_id(sk, skb);
Eric Dumazet404e0a82012-07-29 23:20:37 +00001529 if (dst) {
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001530 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
Ian Morris51456b22015-04-03 09:17:26 +01001531 !dst->ops->check(dst, 0)) {
David S. Miller92101b32012-07-23 16:29:00 -07001532 dst_release(dst);
1533 sk->sk_rx_dst = NULL;
1534 }
1535 }
Yafang Shao3d97d882018-05-29 23:27:31 +08001536 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 return 0;
1538 }
1539
Eric Dumazet12e25e12015-06-03 23:49:21 -07001540 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 goto csum_err;
1542
1543 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001544 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1545
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 if (!nsk)
1547 goto discard;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548 if (nsk != sk) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001549 if (tcp_child_process(sk, nsk, skb)) {
1550 rsk = nsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 return 0;
1554 }
Eric Dumazetca551582010-06-03 09:03:58 +00001555 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001556 sock_rps_save_rxhash(sk, skb);
Eric Dumazetca551582010-06-03 09:03:58 +00001557
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001558 if (tcp_rcv_state_process(sk, skb)) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001559 rsk = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001561 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 return 0;
1563
1564reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001565 tcp_v4_send_reset(rsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566discard:
1567 kfree_skb(skb);
1568 /* Be careful here. If this function gets more complicated and
1569 * gcc suffers from register pressure on the x86, sk (in %ebx)
1570 * might be destroyed here. This current version compiles correctly,
1571 * but you have been warned.
1572 */
1573 return 0;
1574
1575csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001576 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1577 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 goto discard;
1579}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001580EXPORT_SYMBOL(tcp_v4_do_rcv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581
Paolo Abeni74874492017-09-28 15:51:36 +02001582int tcp_v4_early_demux(struct sk_buff *skb)
David S. Miller41063e92012-06-19 21:22:05 -07001583{
David S. Miller41063e92012-06-19 21:22:05 -07001584 const struct iphdr *iph;
1585 const struct tcphdr *th;
1586 struct sock *sk;
David S. Miller41063e92012-06-19 21:22:05 -07001587
David S. Miller41063e92012-06-19 21:22:05 -07001588 if (skb->pkt_type != PACKET_HOST)
Paolo Abeni74874492017-09-28 15:51:36 +02001589 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001590
Eric Dumazet45f00f92012-10-22 21:42:47 +00001591 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
Paolo Abeni74874492017-09-28 15:51:36 +02001592 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001593
1594 iph = ip_hdr(skb);
Eric Dumazet45f00f92012-10-22 21:42:47 +00001595 th = tcp_hdr(skb);
David S. Miller41063e92012-06-19 21:22:05 -07001596
1597 if (th->doff < sizeof(struct tcphdr) / 4)
Paolo Abeni74874492017-09-28 15:51:36 +02001598 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001599
Eric Dumazet45f00f92012-10-22 21:42:47 +00001600 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
David S. Miller41063e92012-06-19 21:22:05 -07001601 iph->saddr, th->source,
Vijay Subramanian7011d082012-06-23 17:38:10 +00001602 iph->daddr, ntohs(th->dest),
David Ahern3fa6f612017-08-07 08:44:17 -07001603 skb->skb_iif, inet_sdif(skb));
David S. Miller41063e92012-06-19 21:22:05 -07001604 if (sk) {
1605 skb->sk = sk;
1606 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001607 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001608 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001609
David S. Miller41063e92012-06-19 21:22:05 -07001610 if (dst)
1611 dst = dst_check(dst, 0);
David S. Miller92101b32012-07-23 16:29:00 -07001612 if (dst &&
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001613 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
David S. Miller92101b32012-07-23 16:29:00 -07001614 skb_dst_set_noref(skb, dst);
David S. Miller41063e92012-06-19 21:22:05 -07001615 }
1616 }
Paolo Abeni74874492017-09-28 15:51:36 +02001617 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001618}
1619
Eric Dumazetc9c33212016-08-27 07:37:54 -07001620bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1621{
1622 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
Eric Dumazet4f693b52018-11-27 14:42:03 -08001623 struct skb_shared_info *shinfo;
1624 const struct tcphdr *th;
1625 struct tcphdr *thtail;
1626 struct sk_buff *tail;
1627 unsigned int hdrlen;
1628 bool fragstolen;
1629 u32 gso_segs;
1630 int delta;
Eric Dumazetc9c33212016-08-27 07:37:54 -07001631
1632 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1633 * we can fix skb->truesize to its real value to avoid future drops.
1634 * This is valid because skb is not yet charged to the socket.
1635 * It has been noticed pure SACK packets were sometimes dropped
1636 * (if cooked by drivers without copybreak feature).
1637 */
Eric Dumazet60b1af32017-01-24 14:57:36 -08001638 skb_condense(skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001639
Eric Dumazetade96282018-11-19 17:45:55 -08001640 skb_dst_drop(skb);
1641
Eric Dumazet4f693b52018-11-27 14:42:03 -08001642 if (unlikely(tcp_checksum_complete(skb))) {
1643 bh_unlock_sock(sk);
1644 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1645 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1646 return true;
1647 }
1648
1649 /* Attempt coalescing to last skb in backlog, even if we are
1650 * above the limits.
1651 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1652 */
1653 th = (const struct tcphdr *)skb->data;
1654 hdrlen = th->doff * 4;
1655 shinfo = skb_shinfo(skb);
1656
1657 if (!shinfo->gso_size)
1658 shinfo->gso_size = skb->len - hdrlen;
1659
1660 if (!shinfo->gso_segs)
1661 shinfo->gso_segs = 1;
1662
1663 tail = sk->sk_backlog.tail;
1664 if (!tail)
1665 goto no_coalesce;
1666 thtail = (struct tcphdr *)tail->data;
1667
1668 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1669 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1670 ((TCP_SKB_CB(tail)->tcp_flags |
Eric Dumazetca2fe292019-04-26 10:10:05 -07001671 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1672 !((TCP_SKB_CB(tail)->tcp_flags &
1673 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
Eric Dumazet4f693b52018-11-27 14:42:03 -08001674 ((TCP_SKB_CB(tail)->tcp_flags ^
1675 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1676#ifdef CONFIG_TLS_DEVICE
1677 tail->decrypted != skb->decrypted ||
1678#endif
1679 thtail->doff != th->doff ||
1680 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1681 goto no_coalesce;
1682
1683 __skb_pull(skb, hdrlen);
1684 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
1685 thtail->window = th->window;
1686
1687 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1688
1689 if (after(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))
1690 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
1691
Eric Dumazetca2fe292019-04-26 10:10:05 -07001692 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1693 * thtail->fin, so that the fast path in tcp_rcv_established()
1694 * is not entered if we append a packet with a FIN.
1695 * SYN, RST, URG are not present.
1696 * ACK is set on both packets.
1697 * PSH : we do not really care in TCP stack,
1698 * at least for 'GRO' packets.
1699 */
1700 thtail->fin |= th->fin;
Eric Dumazet4f693b52018-11-27 14:42:03 -08001701 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1702
1703 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1704 TCP_SKB_CB(tail)->has_rxtstamp = true;
1705 tail->tstamp = skb->tstamp;
1706 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1707 }
1708
1709 /* Not as strict as GRO. We only need to carry mss max value */
1710 skb_shinfo(tail)->gso_size = max(shinfo->gso_size,
1711 skb_shinfo(tail)->gso_size);
1712
1713 gso_segs = skb_shinfo(tail)->gso_segs + shinfo->gso_segs;
1714 skb_shinfo(tail)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
1715
1716 sk->sk_backlog.len += delta;
1717 __NET_INC_STATS(sock_net(sk),
1718 LINUX_MIB_TCPBACKLOGCOALESCE);
1719 kfree_skb_partial(skb, fragstolen);
1720 return false;
1721 }
1722 __skb_push(skb, hdrlen);
1723
1724no_coalesce:
1725 /* Only socket owner can try to collapse/prune rx queues
1726 * to reduce memory overhead, so add a little headroom here.
1727 * Few sockets backlog are possibly concurrently non empty.
1728 */
1729 limit += 64*1024;
1730
Eric Dumazetc9c33212016-08-27 07:37:54 -07001731 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1732 bh_unlock_sock(sk);
1733 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1734 return true;
1735 }
1736 return false;
1737}
1738EXPORT_SYMBOL(tcp_add_backlog);
1739
Eric Dumazetac6e7802016-11-10 13:12:35 -08001740int tcp_filter(struct sock *sk, struct sk_buff *skb)
1741{
1742 struct tcphdr *th = (struct tcphdr *)skb->data;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001743
Christoph Paaschf2feaef2019-03-11 11:41:05 -07001744 return sk_filter_trim_cap(sk, skb, th->doff * 4);
Eric Dumazetac6e7802016-11-10 13:12:35 -08001745}
1746EXPORT_SYMBOL(tcp_filter);
1747
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001748static void tcp_v4_restore_cb(struct sk_buff *skb)
1749{
1750 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1751 sizeof(struct inet_skb_parm));
1752}
1753
1754static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1755 const struct tcphdr *th)
1756{
1757 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1758 * barrier() makes sure compiler wont play fool^Waliasing games.
1759 */
1760 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1761 sizeof(struct inet_skb_parm));
1762 barrier();
1763
1764 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1765 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1766 skb->len - th->doff * 4);
1767 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1768 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1769 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1770 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1771 TCP_SKB_CB(skb)->sacked = 0;
1772 TCP_SKB_CB(skb)->has_rxtstamp =
1773 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1774}
1775
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776/*
1777 * From tcp_input.c
1778 */
1779
1780int tcp_v4_rcv(struct sk_buff *skb)
1781{
Eric Dumazet3b24d852016-04-01 08:52:17 -07001782 struct net *net = dev_net(skb->dev);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001783 struct sk_buff *skb_to_free;
David Ahern3fa6f612017-08-07 08:44:17 -07001784 int sdif = inet_sdif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001785 const struct iphdr *iph;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001786 const struct tcphdr *th;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001787 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 struct sock *sk;
1789 int ret;
1790
1791 if (skb->pkt_type != PACKET_HOST)
1792 goto discard_it;
1793
1794 /* Count it even if it's bad */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001795 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
1797 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1798 goto discard_it;
1799
Eric Dumazetea1627c2016-05-13 09:16:40 -07001800 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801
Eric Dumazetea1627c2016-05-13 09:16:40 -07001802 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 goto bad_packet;
1804 if (!pskb_may_pull(skb, th->doff * 4))
1805 goto discard_it;
1806
1807 /* An explanation is required here, I think.
1808 * Packet length and doff are validated by header prediction,
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001809 * provided case of th->doff==0 is eliminated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 * So, we defer the checks. */
Tom Herberted70fcf2014-05-02 16:29:38 -07001811
1812 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001813 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
Eric Dumazetea1627c2016-05-13 09:16:40 -07001815 th = (const struct tcphdr *)skb->data;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001816 iph = ip_hdr(skb);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001817lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001818 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
David Ahern3fa6f612017-08-07 08:44:17 -07001819 th->dest, sdif, &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 if (!sk)
1821 goto no_tcp_socket;
1822
Eric Dumazetbb134d52010-03-09 05:55:56 +00001823process:
1824 if (sk->sk_state == TCP_TIME_WAIT)
1825 goto do_time_wait;
1826
Eric Dumazet079096f2015-10-02 11:43:32 -07001827 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1828 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001829 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001830 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001831
1832 sk = req->rsk_listener;
Eric Dumazet72923552016-02-11 22:50:29 -08001833 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001834 sk_drops_add(sk, skb);
Eric Dumazet72923552016-02-11 22:50:29 -08001835 reqsk_put(req);
1836 goto discard_it;
1837 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001838 if (tcp_checksum_complete(skb)) {
1839 reqsk_put(req);
1840 goto csum_error;
1841 }
Eric Dumazet77166822016-02-18 05:39:18 -08001842 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001843 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001844 goto lookup;
1845 }
Eric Dumazet3b24d852016-04-01 08:52:17 -07001846 /* We own a reference on the listener, increase it again
1847 * as we might lose it too soon.
1848 */
Eric Dumazet77166822016-02-18 05:39:18 -08001849 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001850 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001851 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001852 if (!tcp_filter(sk, skb)) {
1853 th = (const struct tcphdr *)skb->data;
1854 iph = ip_hdr(skb);
1855 tcp_v4_fill_cb(skb, iph, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001856 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001857 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001858 if (!nsk) {
1859 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001860 if (req_stolen) {
1861 /* Another cpu got exclusive access to req
1862 * and created a full blown socket.
1863 * Try to feed this packet to this socket
1864 * instead of discarding it.
1865 */
1866 tcp_v4_restore_cb(skb);
1867 sock_put(sk);
1868 goto lookup;
1869 }
Eric Dumazet77166822016-02-18 05:39:18 -08001870 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001871 }
1872 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001873 reqsk_put(req);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001874 tcp_v4_restore_cb(skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001875 } else if (tcp_child_process(sk, nsk, skb)) {
1876 tcp_v4_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001877 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001878 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001879 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001880 return 0;
1881 }
1882 }
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001883 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001884 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001885 goto discard_and_relse;
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001886 }
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001887
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1889 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001890
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001891 if (tcp_v4_inbound_md5_hash(sk, skb))
1892 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001893
Patrick McHardyb59c2702006-01-06 23:06:10 -08001894 nf_reset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
Eric Dumazetac6e7802016-11-10 13:12:35 -08001896 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001898 th = (const struct tcphdr *)skb->data;
1899 iph = ip_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001900 tcp_v4_fill_cb(skb, iph, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901
1902 skb->dev = NULL;
1903
Eric Dumazete994b2f2015-10-02 11:43:39 -07001904 if (sk->sk_state == TCP_LISTEN) {
1905 ret = tcp_v4_do_rcv(sk, skb);
1906 goto put_and_return;
1907 }
1908
1909 sk_incoming_cpu_update(sk);
1910
Ingo Molnarc6366182006-07-03 00:25:13 -07001911 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001912 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913 ret = 0;
1914 if (!sock_owned_by_user(sk)) {
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001915 skb_to_free = sk->sk_rx_skb_cache;
1916 sk->sk_rx_skb_cache = NULL;
Florian Westphale7942d02017-07-30 03:57:18 +02001917 ret = tcp_v4_do_rcv(sk, skb);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001918 } else {
1919 if (tcp_add_backlog(sk, skb))
1920 goto discard_and_relse;
1921 skb_to_free = NULL;
Zhu Yi6b03a532010-03-04 18:01:41 +00001922 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923 bh_unlock_sock(sk);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001924 if (skb_to_free)
1925 __kfree_skb(skb_to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926
Eric Dumazete994b2f2015-10-02 11:43:39 -07001927put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001928 if (refcounted)
1929 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930
1931 return ret;
1932
1933no_tcp_socket:
1934 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1935 goto discard_it;
1936
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001937 tcp_v4_fill_cb(skb, iph, th);
1938
Eric Dumazet12e25e12015-06-03 23:49:21 -07001939 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001940csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001941 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001943 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001945 tcp_v4_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 }
1947
1948discard_it:
1949 /* Discard frame. */
1950 kfree_skb(skb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001951 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952
1953discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001954 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001955 if (refcounted)
1956 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957 goto discard_it;
1958
1959do_time_wait:
1960 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001961 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 goto discard_it;
1963 }
1964
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001965 tcp_v4_fill_cb(skb, iph, th);
1966
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001967 if (tcp_checksum_complete(skb)) {
1968 inet_twsk_put(inet_twsk(sk));
1969 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 }
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001971 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 case TCP_TW_SYN: {
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001973 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
Craig Galleka5836362016-02-10 11:50:38 -05001974 &tcp_hashinfo, skb,
1975 __tcp_hdrlen(th),
Tom Herbertda5e3632013-01-22 09:50:24 +00001976 iph->saddr, th->source,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001977 iph->daddr, th->dest,
David Ahern3fa6f612017-08-07 08:44:17 -07001978 inet_iif(skb),
1979 sdif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 if (sk2) {
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001981 inet_twsk_deschedule_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982 sk = sk2;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001983 tcp_v4_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001984 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 goto process;
1986 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 }
Gustavo A. R. Silvafcfd6df2017-10-16 15:48:55 -05001988 /* to ACK */
1989 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 case TCP_TW_ACK:
1991 tcp_v4_timewait_ack(sk, skb);
1992 break;
1993 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001994 tcp_v4_send_reset(sk, skb);
1995 inet_twsk_deschedule_put(inet_twsk(sk));
1996 goto discard_it;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 case TCP_TW_SUCCESS:;
1998 }
1999 goto discard_it;
2000}
2001
David S. Millerccb7c412010-12-01 18:09:13 -08002002static struct timewait_sock_ops tcp_timewait_sock_ops = {
2003 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2004 .twsk_unique = tcp_twsk_unique,
2005 .twsk_destructor= tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08002006};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
Eric Dumazet63d02d12012-08-09 14:11:00 +00002008void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
Eric Dumazet5d299f32012-08-06 05:09:33 +00002009{
2010 struct dst_entry *dst = skb_dst(skb);
2011
Eric Dumazet5037e9e2015-12-14 14:08:53 -08002012 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -07002013 sk->sk_rx_dst = dst;
2014 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2015 }
Eric Dumazet5d299f32012-08-06 05:09:33 +00002016}
Eric Dumazet63d02d12012-08-09 14:11:00 +00002017EXPORT_SYMBOL(inet_sk_rx_dst_set);
Eric Dumazet5d299f32012-08-06 05:09:33 +00002018
Stephen Hemminger3b401a82009-09-01 19:25:04 +00002019const struct inet_connection_sock_af_ops ipv4_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002020 .queue_xmit = ip_queue_xmit,
2021 .send_check = tcp_v4_send_check,
2022 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00002023 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002024 .conn_request = tcp_v4_conn_request,
2025 .syn_recv_sock = tcp_v4_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002026 .net_header_len = sizeof(struct iphdr),
2027 .setsockopt = ip_setsockopt,
2028 .getsockopt = ip_getsockopt,
2029 .addr2sockaddr = inet_csk_addr2sockaddr,
2030 .sockaddr_len = sizeof(struct sockaddr_in),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002031#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002032 .compat_setsockopt = compat_ip_setsockopt,
2033 .compat_getsockopt = compat_ip_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002034#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04002035 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00002037EXPORT_SYMBOL(ipv4_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002039#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00002040static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002041 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07002042 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002043 .md5_parse = tcp_v4_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002044};
Andrew Mortonb6332e62006-11-30 19:16:28 -08002045#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002046
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047/* NOTE: A lot of things set to zero explicitly by call to
2048 * sk_alloc() so need not be done here.
2049 */
2050static int tcp_v4_init_sock(struct sock *sk)
2051{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002052 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053
Neal Cardwell900f65d2012-04-19 09:55:21 +00002054 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08002056 icsk->icsk_af_ops = &ipv4_specific;
Neal Cardwell900f65d2012-04-19 09:55:21 +00002057
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002058#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04002059 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002060#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 return 0;
2063}
2064
Brian Haley7d06b2e2008-06-14 17:04:49 -07002065void tcp_v4_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066{
2067 struct tcp_sock *tp = tcp_sk(sk);
2068
Song Liue1a4aa52017-10-23 09:20:26 -07002069 trace_tcp_destroy_sock(sk);
2070
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 tcp_clear_xmit_timers(sk);
2072
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002073 tcp_cleanup_congestion_control(sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002074
Dave Watson734942c2017-06-14 11:37:14 -07002075 tcp_cleanup_ulp(sk);
2076
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 /* Cleanup up the write buffer. */
David S. Millerfe067e82007-03-07 12:12:44 -08002078 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079
Wei Wangcf1ef3f2017-04-20 14:45:46 -07002080 /* Check if we want to disable active TFO */
2081 tcp_fastopen_active_disable_ofo_check(sk);
2082
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 /* Cleans up our, hopefully empty, out_of_order_queue. */
Yaogong Wang9f5afea2016-09-07 14:49:28 -07002084 skb_rbtree_purge(&tp->out_of_order_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002086#ifdef CONFIG_TCP_MD5SIG
2087 /* Clean up the MD5 key list, if any */
2088 if (tp->md5sig_info) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00002089 tcp_clear_md5_list(sk);
Mat Martineaufb7df5e2017-12-21 10:29:10 -08002090 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002091 tp->md5sig_info = NULL;
2092 }
2093#endif
2094
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 /* Clean up a referenced TCP bind bucket. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002096 if (inet_csk(sk)->icsk_bind_hash)
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002097 inet_put_port(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
Ian Morris00db4122015-04-03 09:17:27 +01002099 BUG_ON(tp->fastopen_rsk);
William Allen Simpson435cf552009-12-02 18:17:05 +00002100
Yuchung Chengcf60af02012-07-19 06:43:09 +00002101 /* If socket is aborted during connect operation */
2102 tcp_free_fastopen_req(tp);
Yuchung Cheng1fba70e2017-10-18 11:22:51 -07002103 tcp_fastopen_destroy_cipher(sk);
Eric Dumazetcd8ae852015-05-03 21:34:46 -07002104 tcp_saved_syn_free(tp);
Yuchung Chengcf60af02012-07-19 06:43:09 +00002105
Glauber Costa180d8cd2011-12-11 21:47:02 +00002106 sk_sockets_allocated_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108EXPORT_SYMBOL(tcp_v4_destroy_sock);
2109
2110#ifdef CONFIG_PROC_FS
2111/* Proc filesystem TCP sock list dumping. */
2112
Tom Herberta8b690f2010-06-07 00:43:42 -07002113/*
2114 * Get next listener socket follow cur. If cur is NULL, get first socket
2115 * starting from bucket given in st->bucket; when st->bucket is zero the
2116 * very first socket in the hash table is returned.
2117 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118static void *listening_get_next(struct seq_file *seq, void *cur)
2119{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002120 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08002121 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002122 struct net *net = seq_file_net(seq);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002123 struct inet_listen_hashbucket *ilb;
Eric Dumazet3b24d852016-04-01 08:52:17 -07002124 struct sock *sk = cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
2126 if (!sk) {
Eric Dumazet3b24d852016-04-01 08:52:17 -07002127get_head:
Tom Herberta8b690f2010-06-07 00:43:42 -07002128 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Eric Dumazet9652dc22016-10-19 21:24:58 -07002129 spin_lock(&ilb->lock);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002130 sk = sk_head(&ilb->head);
Tom Herberta8b690f2010-06-07 00:43:42 -07002131 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 goto get_sk;
2133 }
Eric Dumazet5caea4e2008-11-20 00:40:07 -08002134 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002136 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
Eric Dumazet3b24d852016-04-01 08:52:17 -07002138 sk = sk_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139get_sk:
Eric Dumazet3b24d852016-04-01 08:52:17 -07002140 sk_for_each_from(sk) {
Pavel Emelyanov8475ef92010-11-22 03:26:12 +00002141 if (!net_eq(sock_net(sk), net))
2142 continue;
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002143 if (sk->sk_family == afinfo->family)
Eric Dumazet3b24d852016-04-01 08:52:17 -07002144 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 }
Eric Dumazet9652dc22016-10-19 21:24:58 -07002146 spin_unlock(&ilb->lock);
Tom Herberta8b690f2010-06-07 00:43:42 -07002147 st->offset = 0;
Eric Dumazet3b24d852016-04-01 08:52:17 -07002148 if (++st->bucket < INET_LHTABLE_SIZE)
2149 goto get_head;
2150 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151}
2152
2153static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2154{
Tom Herberta8b690f2010-06-07 00:43:42 -07002155 struct tcp_iter_state *st = seq->private;
2156 void *rc;
2157
2158 st->bucket = 0;
2159 st->offset = 0;
2160 rc = listening_get_next(seq, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161
2162 while (rc && *pos) {
2163 rc = listening_get_next(seq, rc);
2164 --*pos;
2165 }
2166 return rc;
2167}
2168
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002169static inline bool empty_bucket(const struct tcp_iter_state *st)
Andi Kleen6eac5602008-08-28 01:08:02 -07002170{
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002171 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
Andi Kleen6eac5602008-08-28 01:08:02 -07002172}
2173
Tom Herberta8b690f2010-06-07 00:43:42 -07002174/*
2175 * Get first established socket starting from bucket given in st->bucket.
2176 * If st->bucket is zero, the very first socket in the hash is returned.
2177 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178static void *established_get_first(struct seq_file *seq)
2179{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002180 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08002181 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002182 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 void *rc = NULL;
2184
Tom Herberta8b690f2010-06-07 00:43:42 -07002185 st->offset = 0;
2186 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 struct sock *sk;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002188 struct hlist_nulls_node *node;
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002189 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
Andi Kleen6eac5602008-08-28 01:08:02 -07002191 /* Lockless fast path for the common case of empty buckets */
2192 if (empty_bucket(st))
2193 continue;
2194
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002195 spin_lock_bh(lock);
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002196 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002197 if (sk->sk_family != afinfo->family ||
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002198 !net_eq(sock_net(sk), net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 continue;
2200 }
2201 rc = sk;
2202 goto out;
2203 }
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002204 spin_unlock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 }
2206out:
2207 return rc;
2208}
2209
2210static void *established_get_next(struct seq_file *seq, void *cur)
2211{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002212 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 struct sock *sk = cur;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002214 struct hlist_nulls_node *node;
Jianjun Kong5799de02008-11-03 02:49:10 -08002215 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002216 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217
2218 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002219 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002221 sk = sk_nulls_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002223 sk_nulls_for_each_from(sk, node) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002224 if (sk->sk_family == afinfo->family &&
2225 net_eq(sock_net(sk), net))
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002226 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 }
2228
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002229 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2230 ++st->bucket;
2231 return established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232}
2233
2234static void *established_get_idx(struct seq_file *seq, loff_t pos)
2235{
Tom Herberta8b690f2010-06-07 00:43:42 -07002236 struct tcp_iter_state *st = seq->private;
2237 void *rc;
2238
2239 st->bucket = 0;
2240 rc = established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
2242 while (rc && pos) {
2243 rc = established_get_next(seq, rc);
2244 --pos;
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02002245 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 return rc;
2247}
2248
2249static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2250{
2251 void *rc;
Jianjun Kong5799de02008-11-03 02:49:10 -08002252 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 st->state = TCP_SEQ_STATE_LISTENING;
2255 rc = listening_get_idx(seq, &pos);
2256
2257 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 st->state = TCP_SEQ_STATE_ESTABLISHED;
2259 rc = established_get_idx(seq, pos);
2260 }
2261
2262 return rc;
2263}
2264
Tom Herberta8b690f2010-06-07 00:43:42 -07002265static void *tcp_seek_last_pos(struct seq_file *seq)
2266{
2267 struct tcp_iter_state *st = seq->private;
2268 int offset = st->offset;
2269 int orig_num = st->num;
2270 void *rc = NULL;
2271
2272 switch (st->state) {
Tom Herberta8b690f2010-06-07 00:43:42 -07002273 case TCP_SEQ_STATE_LISTENING:
2274 if (st->bucket >= INET_LHTABLE_SIZE)
2275 break;
2276 st->state = TCP_SEQ_STATE_LISTENING;
2277 rc = listening_get_next(seq, NULL);
2278 while (offset-- && rc)
2279 rc = listening_get_next(seq, rc);
2280 if (rc)
2281 break;
2282 st->bucket = 0;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002283 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002284 /* Fallthrough */
2285 case TCP_SEQ_STATE_ESTABLISHED:
Tom Herberta8b690f2010-06-07 00:43:42 -07002286 if (st->bucket > tcp_hashinfo.ehash_mask)
2287 break;
2288 rc = established_get_first(seq);
2289 while (offset-- && rc)
2290 rc = established_get_next(seq, rc);
2291 }
2292
2293 st->num = orig_num;
2294
2295 return rc;
2296}
2297
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002298void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299{
Jianjun Kong5799de02008-11-03 02:49:10 -08002300 struct tcp_iter_state *st = seq->private;
Tom Herberta8b690f2010-06-07 00:43:42 -07002301 void *rc;
2302
2303 if (*pos && *pos == st->last_pos) {
2304 rc = tcp_seek_last_pos(seq);
2305 if (rc)
2306 goto out;
2307 }
2308
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 st->state = TCP_SEQ_STATE_LISTENING;
2310 st->num = 0;
Tom Herberta8b690f2010-06-07 00:43:42 -07002311 st->bucket = 0;
2312 st->offset = 0;
2313 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2314
2315out:
2316 st->last_pos = *pos;
2317 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002319EXPORT_SYMBOL(tcp_seq_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002321void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322{
Tom Herberta8b690f2010-06-07 00:43:42 -07002323 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 void *rc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
2326 if (v == SEQ_START_TOKEN) {
2327 rc = tcp_get_idx(seq, 0);
2328 goto out;
2329 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330
2331 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 case TCP_SEQ_STATE_LISTENING:
2333 rc = listening_get_next(seq, v);
2334 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002336 st->bucket = 0;
2337 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 rc = established_get_first(seq);
2339 }
2340 break;
2341 case TCP_SEQ_STATE_ESTABLISHED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 rc = established_get_next(seq, v);
2343 break;
2344 }
2345out:
2346 ++*pos;
Tom Herberta8b690f2010-06-07 00:43:42 -07002347 st->last_pos = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002348 return rc;
2349}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002350EXPORT_SYMBOL(tcp_seq_next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002352void tcp_seq_stop(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353{
Jianjun Kong5799de02008-11-03 02:49:10 -08002354 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355
2356 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 case TCP_SEQ_STATE_LISTENING:
2358 if (v != SEQ_START_TOKEN)
Eric Dumazet9652dc22016-10-19 21:24:58 -07002359 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002361 case TCP_SEQ_STATE_ESTABLISHED:
2362 if (v)
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002363 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 break;
2365 }
2366}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002367EXPORT_SYMBOL(tcp_seq_stop);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
Eric Dumazetd4f06872015-03-12 16:44:09 -07002369static void get_openreq4(const struct request_sock *req,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002370 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002372 const struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetfa76ce732015-03-19 19:04:20 -07002373 long delta = req->rsk_timer.expires - jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002375 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002376 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 i,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002378 ireq->ir_loc_addr,
Eric Dumazetd4f06872015-03-12 16:44:09 -07002379 ireq->ir_num,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002380 ireq->ir_rmt_addr,
2381 ntohs(ireq->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 TCP_SYN_RECV,
2383 0, 0, /* could print option size, but that is af dependent. */
2384 1, /* timers active (only the expire timer) */
Eric Dumazeta399a802012-08-08 21:13:53 +00002385 jiffies_delta_to_clock_t(delta),
Eric Dumazete6c022a2012-10-27 23:16:46 +00002386 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002387 from_kuid_munged(seq_user_ns(f),
2388 sock_i_uid(req->rsk_listener)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 0, /* non standard timer */
2390 0, /* open_requests have no inode */
Eric Dumazetd4f06872015-03-12 16:44:09 -07002391 0,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002392 req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393}
2394
Tetsuo Handa652586d2013-11-14 14:31:57 -08002395static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396{
2397 int timer_active;
2398 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002399 const struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002400 const struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002401 const struct inet_sock *inet = inet_sk(sk);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002402 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00002403 __be32 dest = inet->inet_daddr;
2404 __be32 src = inet->inet_rcv_saddr;
2405 __u16 destp = ntohs(inet->inet_dport);
2406 __u16 srcp = ntohs(inet->inet_sport);
Eric Dumazet49d09002009-12-03 16:06:13 -08002407 int rx_queue;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002408 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002410 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08002411 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002412 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002414 timer_expires = icsk->icsk_timeout;
2415 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002417 timer_expires = icsk->icsk_timeout;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002418 } else if (timer_pending(&sk->sk_timer)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002419 timer_active = 2;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002420 timer_expires = sk->sk_timer.expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 } else {
2422 timer_active = 0;
2423 timer_expires = jiffies;
2424 }
2425
Yafang Shao986ffdf2017-12-20 11:12:52 +08002426 state = inet_sk_state_load(sk);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002427 if (state == TCP_LISTEN)
Eric Dumazet49d09002009-12-03 16:06:13 -08002428 rx_queue = sk->sk_ack_backlog;
2429 else
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002430 /* Because we don't lock the socket,
2431 * we might find a transient negative value.
Eric Dumazet49d09002009-12-03 16:06:13 -08002432 */
2433 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2434
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002435 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
Tetsuo Handa652586d2013-11-14 14:31:57 -08002436 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002437 i, src, srcp, dest, destp, state,
Sridhar Samudrala47da8ee2006-06-27 13:29:00 -07002438 tp->write_seq - tp->snd_una,
Eric Dumazet49d09002009-12-03 16:06:13 -08002439 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00002441 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002442 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002443 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002444 icsk->icsk_probes_out,
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002445 sock_i_ino(sk),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002446 refcount_read(&sk->sk_refcnt), sk,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002447 jiffies_to_clock_t(icsk->icsk_rto),
2448 jiffies_to_clock_t(icsk->icsk_ack.ato),
Wei Wang31954cd2019-01-25 10:53:19 -08002449 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002451 state == TCP_LISTEN ?
2452 fastopenq->max_qlen :
Tetsuo Handa652586d2013-11-14 14:31:57 -08002453 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454}
2455
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002456static void get_timewait4_sock(const struct inet_timewait_sock *tw,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002457 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458{
Eric Dumazet789f5582015-04-12 18:51:09 -07002459 long delta = tw->tw_timer.expires - jiffies;
Al Viro23f33c22006-09-27 18:43:50 -07002460 __be32 dest, src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462
2463 dest = tw->tw_daddr;
2464 src = tw->tw_rcv_saddr;
2465 destp = ntohs(tw->tw_dport);
2466 srcp = ntohs(tw->tw_sport);
2467
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002468 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002469 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002470 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002471 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002472 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473}
2474
2475#define TMPSZ 150
2476
2477static int tcp4_seq_show(struct seq_file *seq, void *v)
2478{
Jianjun Kong5799de02008-11-03 02:49:10 -08002479 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002480 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481
Tetsuo Handa652586d2013-11-14 14:31:57 -08002482 seq_setwidth(seq, TMPSZ - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 if (v == SEQ_START_TOKEN) {
Tetsuo Handa652586d2013-11-14 14:31:57 -08002484 seq_puts(seq, " sl local_address rem_address st tx_queue "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485 "rx_queue tr tm->when retrnsmt uid timeout "
2486 "inode");
2487 goto out;
2488 }
2489 st = seq->private;
2490
Eric Dumazet079096f2015-10-02 11:43:32 -07002491 if (sk->sk_state == TCP_TIME_WAIT)
2492 get_timewait4_sock(v, seq, st->num);
2493 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002494 get_openreq4(v, seq, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002495 else
2496 get_tcp4_sock(v, seq, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497out:
Tetsuo Handa652586d2013-11-14 14:31:57 -08002498 seq_pad(seq, '\n');
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 return 0;
2500}
2501
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002502static const struct seq_operations tcp4_seq_ops = {
2503 .show = tcp4_seq_show,
2504 .start = tcp_seq_start,
2505 .next = tcp_seq_next,
2506 .stop = tcp_seq_stop,
2507};
2508
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509static struct tcp_seq_afinfo tcp4_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002510 .family = AF_INET,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511};
2512
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002513static int __net_init tcp4_proc_init_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002514{
Christoph Hellwigc3506372018-04-10 19:42:55 +02002515 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
2516 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002517 return -ENOMEM;
2518 return 0;
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002519}
2520
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002521static void __net_exit tcp4_proc_exit_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002522{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002523 remove_proc_entry("tcp", net->proc_net);
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002524}
2525
2526static struct pernet_operations tcp4_net_ops = {
2527 .init = tcp4_proc_init_net,
2528 .exit = tcp4_proc_exit_net,
2529};
2530
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531int __init tcp4_proc_init(void)
2532{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002533 return register_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534}
2535
2536void tcp4_proc_exit(void)
2537{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002538 unregister_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539}
2540#endif /* CONFIG_PROC_FS */
2541
2542struct proto tcp_prot = {
2543 .name = "TCP",
2544 .owner = THIS_MODULE,
2545 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002546 .pre_connect = tcp_v4_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547 .connect = tcp_v4_connect,
2548 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002549 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 .ioctl = tcp_ioctl,
2551 .init = tcp_v4_init_sock,
2552 .destroy = tcp_v4_destroy_sock,
2553 .shutdown = tcp_shutdown,
2554 .setsockopt = tcp_setsockopt,
2555 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01002556 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002558 .sendmsg = tcp_sendmsg,
2559 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 .backlog_rcv = tcp_v4_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002561 .release_cb = tcp_release_cb,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002562 .hash = inet_hash,
2563 .unhash = inet_unhash,
2564 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07002566 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07002567 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568 .sockets_allocated = &tcp_sockets_allocated,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002569 .orphan_count = &tcp_orphan_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 .memory_allocated = &tcp_memory_allocated,
2571 .memory_pressure = &tcp_memory_pressure,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07002572 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08002573 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2574 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575 .max_header = MAX_TCP_HEADER,
2576 .obj_size = sizeof(struct tcp_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08002577 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002578 .twsk_prot = &tcp_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002579 .rsk_prot = &tcp_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002580 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002581 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002582#ifdef CONFIG_COMPAT
2583 .compat_setsockopt = compat_tcp_setsockopt,
2584 .compat_getsockopt = compat_tcp_getsockopt,
2585#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002586 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00002588EXPORT_SYMBOL(tcp_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589
Denis V. Lunev046ee902008-04-03 14:31:33 -07002590static void __net_exit tcp_sk_exit(struct net *net)
2591{
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002592 int cpu;
2593
Dust Lib506bc92019-04-01 16:04:53 +08002594 if (net->ipv4.tcp_congestion_control)
2595 module_put(net->ipv4.tcp_congestion_control->owner);
Stephen Hemminger6670e152017-11-14 08:25:49 -08002596
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002597 for_each_possible_cpu(cpu)
2598 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2599 free_percpu(net->ipv4.tcp_sk);
2600}
2601
2602static int __net_init tcp_sk_init(struct net *net)
2603{
Haishuang Yanfee83d02016-12-28 17:52:33 +08002604 int res, cpu, cnt;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002605
2606 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2607 if (!net->ipv4.tcp_sk)
2608 return -ENOMEM;
2609
2610 for_each_possible_cpu(cpu) {
2611 struct sock *sk;
2612
2613 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2614 IPPROTO_TCP, net);
2615 if (res)
2616 goto fail;
Eric Dumazeta9d65322016-04-01 08:52:21 -07002617 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
Eric Dumazet431280e2018-08-22 13:30:45 -07002618
2619 /* Please enforce IP_DF and IPID==0 for RST and
2620 * ACK sent in SYN-RECV and TIME-WAIT state.
2621 */
2622 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
2623
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002624 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2625 }
Daniel Borkmann49213552015-05-19 21:04:22 +02002626
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002627 net->ipv4.sysctl_tcp_ecn = 2;
Daniel Borkmann49213552015-05-19 21:04:22 +02002628 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2629
Fan Dub0f9ca52015-02-10 09:53:16 +08002630 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
Eric Dumazet5f3e2bf002019-06-06 09:15:31 -07002631 net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
Fan Du6b58e0a2015-03-06 11:18:23 +08002632 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
Fan Du05cbc0d2015-03-06 11:18:24 +08002633 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002634
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002635 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
Nikolay Borisov9bd68612016-01-07 16:38:44 +02002636 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
Nikolay Borisovb840d152016-01-07 16:38:45 +02002637 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002638
Nikolay Borisov6fa25162016-02-03 09:46:49 +02002639 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
Nikolay Borisov7c083ec2016-02-03 09:46:50 +02002640 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
David S. Miller0aca7372016-02-08 04:24:33 -05002641 net->ipv4.sysctl_tcp_syncookies = 1;
Nikolay Borisov1043e252016-02-03 09:46:52 +02002642 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
Nikolay Borisovae5c3f42016-02-03 09:46:53 +02002643 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
Nikolay Borisovc6214a92016-02-03 09:46:54 +02002644 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
Nikolay Borisovc402d9b2016-02-03 09:46:55 +02002645 net->ipv4.sysctl_tcp_orphan_retries = 0;
Nikolay Borisov1e579ca2016-02-03 09:46:56 +02002646 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
Nikolay Borisov4979f2d2016-02-03 09:46:57 +02002647 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -07002648 net->ipv4.sysctl_tcp_tw_reuse = 2;
Nikolay Borisov12ed8242016-02-03 09:46:51 +02002649
Haishuang Yanfee83d02016-12-28 17:52:33 +08002650 cnt = tcp_hashinfo.ehash_mask + 1;
Yafang Shao743e4812018-09-01 20:21:05 +08002651 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
Haishuang Yan1946e672016-12-28 17:52:32 +08002652 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2653
Haishuang Yanfee83d02016-12-28 17:52:33 +08002654 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
Eric Dumazetf9301032017-06-07 10:34:37 -07002655 net->ipv4.sysctl_tcp_sack = 1;
Eric Dumazet9bb37ef2017-06-07 10:34:38 -07002656 net->ipv4.sysctl_tcp_window_scaling = 1;
Eric Dumazet5d2ed052017-06-07 10:34:39 -07002657 net->ipv4.sysctl_tcp_timestamps = 1;
Eric Dumazet2ae21cf2017-10-26 21:54:56 -07002658 net->ipv4.sysctl_tcp_early_retrans = 3;
Eric Dumazete20223f2017-10-26 21:54:57 -07002659 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
Eric Dumazetb510f0d2017-10-26 21:54:59 -07002660 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
Eric Dumazete0a1e5b2017-10-26 21:55:00 -07002661 net->ipv4.sysctl_tcp_retrans_collapse = 1;
Eric Dumazetc6e21802017-10-26 21:55:06 -07002662 net->ipv4.sysctl_tcp_max_reordering = 300;
Eric Dumazet6496f6b2017-10-26 21:55:07 -07002663 net->ipv4.sysctl_tcp_dsack = 1;
Eric Dumazet0c126542017-10-26 21:55:08 -07002664 net->ipv4.sysctl_tcp_app_win = 31;
Eric Dumazet94f08932017-10-26 21:55:09 -07002665 net->ipv4.sysctl_tcp_adv_win_scale = 1;
Eric Dumazetaf9b69a2017-10-26 21:55:10 -07002666 net->ipv4.sysctl_tcp_frto = 2;
Eric Dumazet4540c0c2017-10-27 07:47:22 -07002667 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
Eric Dumazetd06a9902017-10-27 07:47:23 -07002668 /* This limits the percentage of the congestion window which we
2669 * will allow a single TSO frame to consume. Building TSO frames
2670 * which are too large can cause TCP streams to be bursty.
2671 */
2672 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
Eric Dumazetc73e5802018-11-11 07:34:28 -08002673 /* Default TSQ limit of 16 TSO segments */
2674 net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
Eric Dumazetb530b682017-10-27 07:47:26 -07002675 /* rfc5961 challenge ack rate limiting */
2676 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
Eric Dumazet26e95962017-10-27 07:47:27 -07002677 net->ipv4.sysctl_tcp_min_tso_segs = 2;
Eric Dumazetbd239702017-10-27 07:47:28 -07002678 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
Eric Dumazet790f00e2017-10-27 07:47:29 -07002679 net->ipv4.sysctl_tcp_autocorking = 1;
Eric Dumazet4170ba62017-10-27 07:47:30 -07002680 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
Eric Dumazet23a7102a2017-10-27 07:47:31 -07002681 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
Eric Dumazetc26e91f2017-10-27 07:47:32 -07002682 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
Eric Dumazet356d1832017-11-07 00:29:28 -08002683 if (net != &init_net) {
2684 memcpy(net->ipv4.sysctl_tcp_rmem,
2685 init_net.ipv4.sysctl_tcp_rmem,
2686 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2687 memcpy(net->ipv4.sysctl_tcp_wmem,
2688 init_net.ipv4.sysctl_tcp_wmem,
2689 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2690 }
Eric Dumazet6d82aa22018-05-17 14:47:28 -07002691 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
Eric Dumazet9c21d2f2018-05-17 14:47:29 -07002692 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002693 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
Haishuang Yan43713842017-09-27 11:35:42 +08002694 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
Haishuang Yan3733be12017-09-27 11:35:43 +08002695 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2696 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002697
Stephen Hemminger6670e152017-11-14 08:25:49 -08002698 /* Reno is always built in */
2699 if (!net_eq(net, &init_net) &&
2700 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2701 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2702 else
2703 net->ipv4.tcp_congestion_control = &tcp_reno;
2704
Daniel Borkmann49213552015-05-19 21:04:22 +02002705 return 0;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002706fail:
2707 tcp_sk_exit(net);
2708
2709 return res;
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002710}
2711
2712static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2713{
Haishuang Yan43713842017-09-27 11:35:42 +08002714 struct net *net;
2715
Haishuang Yan1946e672016-12-28 17:52:32 +08002716 inet_twsk_purge(&tcp_hashinfo, AF_INET);
Haishuang Yan43713842017-09-27 11:35:42 +08002717
2718 list_for_each_entry(net, net_exit_list, exit_list)
2719 tcp_fastopen_ctx_destroy(net);
Denis V. Lunev046ee902008-04-03 14:31:33 -07002720}
2721
2722static struct pernet_operations __net_initdata tcp_sk_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002723 .init = tcp_sk_init,
2724 .exit = tcp_sk_exit,
2725 .exit_batch = tcp_sk_exit_batch,
Denis V. Lunev046ee902008-04-03 14:31:33 -07002726};
2727
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08002728void __init tcp_v4_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729{
Eric W. Biederman6a1b3052009-02-22 00:10:18 -08002730 if (register_pernet_subsys(&tcp_sk_ops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 panic("Failed to create the TCP control socket.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732}