blob: 13d868c43284584ee0c58ddfd411bb52c8b0c830 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * Implementation of the Transmission Control Protocol(TCP).
8 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * IPv4 specific functions
10 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
Linus Torvalds1da177e2005-04-16 15:20:36 -070017 */
18
19/*
20 * Changes:
21 * David S. Miller : New socket lookup architecture.
22 * This code is dedicated to John Dyson.
23 * David S. Miller : Change semantics of established hash,
24 * half is devoted to TIME_WAIT sockets
25 * and the rest go in the other half.
26 * Andi Kleen : Add support for syncookies and fixed
27 * some bugs: ip options weren't passed to
28 * the TCP layer, missed a check for an
29 * ACK bit.
30 * Andi Kleen : Implemented fast path mtu discovery.
31 * Fixed many serious bugs in the
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070032 * request_sock handling and moved
Linus Torvalds1da177e2005-04-16 15:20:36 -070033 * most of it into the af independent code.
34 * Added tail drop and some other bugfixes.
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -080035 * Added new listen semantics.
Linus Torvalds1da177e2005-04-16 15:20:36 -070036 * Mike McLagan : Routing by source
37 * Juan Jose Ciarlante: ip_dynaddr bits
38 * Andi Kleen: various fixes.
39 * Vitaly E. Lavrov : Transparent proxy revived after year
40 * coma.
41 * Andi Kleen : Fix new listen.
42 * Andi Kleen : Fix accept error reporting.
43 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
44 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
45 * a single port at the same time.
46 */
47
Joe Perchesafd465032012-03-12 07:03:32 +000048#define pr_fmt(fmt) "TCP: " fmt
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Herbert Xueb4dea52008-12-29 23:04:08 -080050#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <linux/types.h>
52#include <linux/fcntl.h>
53#include <linux/module.h>
54#include <linux/random.h>
55#include <linux/cache.h>
56#include <linux/jhash.h>
57#include <linux/init.h>
58#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090059#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020061#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062#include <net/icmp.h>
Arnaldo Carvalho de Melo304a1612005-08-09 19:59:20 -070063#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064#include <net/tcp.h>
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -030065#include <net/transp_v6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <net/ipv6.h>
67#include <net/inet_common.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080068#include <net/timewait_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <net/xfrm.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070070#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030071#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73#include <linux/inet.h>
74#include <linux/ipv6.h>
75#include <linux/stddef.h>
76#include <linux/proc_fs.h>
77#include <linux/seq_file.h>
Ivan Delalande67973182017-06-15 18:07:06 -070078#include <linux/inetdevice.h>
Yonghong Song951cf362020-07-20 09:34:03 -070079#include <linux/btf_ids.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Herbert Xucf80e0e2016-01-24 21:20:23 +080081#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080082#include <linux/scatterlist.h>
83
Song Liuc24b14c2017-10-23 09:20:24 -070084#include <trace/events/tcp.h>
85
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080086#ifdef CONFIG_TCP_MD5SIG
Eric Dumazeta915da9b2012-01-31 05:18:33 +000087static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -040088 __be32 daddr, __be32 saddr, const struct tcphdr *th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080089#endif
90
Eric Dumazet5caea4e2008-11-20 00:40:07 -080091struct inet_hashinfo tcp_hashinfo;
Eric Dumazet4bc2f182010-07-09 21:22:10 +000092EXPORT_SYMBOL(tcp_hashinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Eric Dumazet84b114b2017-05-05 06:56:54 -070094static u32 tcp_v4_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095{
Eric Dumazet84b114b2017-05-05 06:56:54 -070096 return secure_tcp_seq(ip_hdr(skb)->daddr,
97 ip_hdr(skb)->saddr,
98 tcp_hdr(skb)->dest,
99 tcp_hdr(skb)->source);
100}
101
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700102static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700103{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700104 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105}
106
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800107int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
108{
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700109 const struct inet_timewait_sock *tw = inet_twsk(sktw);
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800110 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111 struct tcp_sock *tp = tcp_sk(sk);
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700112 int reuse = sock_net(sk)->ipv4.sysctl_tcp_tw_reuse;
113
114 if (reuse == 2) {
115 /* Still does not detect *everything* that goes through
116 * lo, since we require a loopback src or dst address
117 * or direct binding to 'lo' interface.
118 */
119 bool loopback = false;
120 if (tw->tw_bound_dev_if == LOOPBACK_IFINDEX)
121 loopback = true;
122#if IS_ENABLED(CONFIG_IPV6)
123 if (tw->tw_family == AF_INET6) {
124 if (ipv6_addr_loopback(&tw->tw_v6_daddr) ||
Eric Dumazetbe2644a2019-10-01 10:49:06 -0700125 ipv6_addr_v4mapped_loopback(&tw->tw_v6_daddr) ||
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700126 ipv6_addr_loopback(&tw->tw_v6_rcv_saddr) ||
Eric Dumazetbe2644a2019-10-01 10:49:06 -0700127 ipv6_addr_v4mapped_loopback(&tw->tw_v6_rcv_saddr))
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -0700128 loopback = true;
129 } else
130#endif
131 {
132 if (ipv4_is_loopback(tw->tw_daddr) ||
133 ipv4_is_loopback(tw->tw_rcv_saddr))
134 loopback = true;
135 }
136 if (!loopback)
137 reuse = 0;
138 }
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800139
140 /* With PAWS, it is safe from the viewpoint
141 of data integrity. Even without PAWS it is safe provided sequence
142 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
143
144 Actually, the idea is close to VJ's one, only timestamp cache is
145 held not per host, but per port pair and TW bucket is used as state
146 holder.
147
148 If TW bucket has been already destroyed we fall back to VJ's scheme
149 and use initial timestamp retrieved from peer table.
150 */
151 if (tcptw->tw_ts_recent_stamp &&
Arnd Bergmanncca9bab2018-07-11 12:16:12 +0200152 (!twp || (reuse && time_after32(ktime_get_seconds(),
153 tcptw->tw_ts_recent_stamp)))) {
Stefan Baranoff21684dc2018-07-10 17:25:20 -0400154 /* In case of repair and re-using TIME-WAIT sockets we still
155 * want to be sure that it is safe as above but honor the
156 * sequence numbers and time stamps set as part of the repair
157 * process.
158 *
159 * Without this check re-using a TIME-WAIT socket with TCP
160 * repair would accumulate a -1 on the repair assigned
161 * sequence number. The first time it is reused the sequence
162 * is -1, the second time -2, etc. This fixes that issue
163 * without appearing to create any others.
164 */
165 if (likely(!tp->repair)) {
Eric Dumazet0f317462019-10-10 20:17:41 -0700166 u32 seq = tcptw->tw_snd_nxt + 65535 + 2;
167
168 if (!seq)
169 seq = 1;
170 WRITE_ONCE(tp->write_seq, seq);
Stefan Baranoff21684dc2018-07-10 17:25:20 -0400171 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
172 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
173 }
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800174 sock_hold(sktw);
175 return 1;
176 }
177
178 return 0;
179}
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800180EXPORT_SYMBOL_GPL(tcp_twsk_unique);
181
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700182static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
183 int addr_len)
184{
185 /* This check is replicated from tcp_v4_connect() and intended to
186 * prevent BPF program called below from accessing bytes that are out
187 * of the bound specified by user in addr_len.
188 */
189 if (addr_len < sizeof(struct sockaddr_in))
190 return -EINVAL;
191
192 sock_owned_by_me(sk);
193
194 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
195}
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/* This will initiate an outgoing connection. */
198int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
199{
David S. Miller2d7192d2011-04-26 13:28:44 -0700200 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 struct inet_sock *inet = inet_sk(sk);
202 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerdca8b082011-02-24 13:38:12 -0800203 __be16 orig_sport, orig_dport;
Al Virobada8ad2006-09-26 21:27:15 -0700204 __be32 daddr, nexthop;
David S. Millerda905bd2011-05-06 16:11:19 -0700205 struct flowi4 *fl4;
David S. Miller2d7192d2011-04-26 13:28:44 -0700206 struct rtable *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 int err;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000208 struct ip_options_rcu *inet_opt;
Haishuang Yan1946e672016-12-28 17:52:32 +0800209 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 if (addr_len < sizeof(struct sockaddr_in))
212 return -EINVAL;
213
214 if (usin->sin_family != AF_INET)
215 return -EAFNOSUPPORT;
216
217 nexthop = daddr = usin->sin_addr.s_addr;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000218 inet_opt = rcu_dereference_protected(inet->inet_opt,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200219 lockdep_sock_is_held(sk));
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000220 if (inet_opt && inet_opt->opt.srr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 if (!daddr)
222 return -EINVAL;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000223 nexthop = inet_opt->opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 }
225
David S. Millerdca8b082011-02-24 13:38:12 -0800226 orig_sport = inet->inet_sport;
227 orig_dport = usin->sin_port;
David S. Millerda905bd2011-05-06 16:11:19 -0700228 fl4 = &inet->cork.fl.u.ip4;
229 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800230 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
231 IPPROTO_TCP,
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200232 orig_sport, orig_dport, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800233 if (IS_ERR(rt)) {
234 err = PTR_ERR(rt);
235 if (err == -ENETUNREACH)
Eric Dumazetf1d8cba2013-11-28 09:51:22 -0800236 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800237 return err;
Wei Dong584bdf82007-05-31 22:49:28 -0700238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
240 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
241 ip_rt_put(rt);
242 return -ENETUNREACH;
243 }
244
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000245 if (!inet_opt || !inet_opt->opt.srr)
David S. Millerda905bd2011-05-06 16:11:19 -0700246 daddr = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000248 if (!inet->inet_saddr)
David S. Millerda905bd2011-05-06 16:11:19 -0700249 inet->inet_saddr = fl4->saddr;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700250 sk_rcv_saddr_set(sk, inet->inet_saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000252 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 /* Reset inherited state */
254 tp->rx_opt.ts_recent = 0;
255 tp->rx_opt.ts_recent_stamp = 0;
Pavel Emelyanovee995282012-04-19 03:40:39 +0000256 if (likely(!tp->repair))
Eric Dumazet0f317462019-10-10 20:17:41 -0700257 WRITE_ONCE(tp->write_seq, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 }
259
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000260 inet->inet_dport = usin->sin_port;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700261 sk_daddr_set(sk, daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800263 inet_csk(sk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000264 if (inet_opt)
265 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000267 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
269 /* Socket identity is still unknown (sport may be zero).
270 * However we set state to SYN-SENT and not releasing socket
271 * lock select source port, enter ourselves into the hash tables and
272 * complete initialization after this.
273 */
274 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800275 err = inet_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 if (err)
277 goto failure;
278
Tom Herbert877d1f62015-07-28 16:02:05 -0700279 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530280
David S. Millerda905bd2011-05-06 16:11:19 -0700281 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800282 inet->inet_sport, inet->inet_dport, sk);
283 if (IS_ERR(rt)) {
284 err = PTR_ERR(rt);
285 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 goto failure;
David S. Millerb23dd4f2011-03-02 14:31:35 -0800287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 /* OK, now commit destination to socket. */
Herbert Xubcd76112006-06-30 13:36:35 -0700289 sk->sk_gso_type = SKB_GSO_TCPV4;
Changli Gaod8d1f302010-06-10 23:31:35 -0700290 sk_setup_caps(sk, &rt->dst);
Wei Wang19f6d3f2017-01-23 10:59:22 -0800291 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300293 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300294 if (!tp->write_seq)
Eric Dumazet0f317462019-10-10 20:17:41 -0700295 WRITE_ONCE(tp->write_seq,
296 secure_tcp_seq(inet->inet_saddr,
297 inet->inet_daddr,
298 inet->inet_sport,
299 usin->sin_port));
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700300 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
301 inet->inet_saddr,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700302 inet->inet_daddr);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300303 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Eric Dumazeta904a062019-11-01 10:32:19 -0700305 inet->inet_id = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
Wei Wang19f6d3f2017-01-23 10:59:22 -0800307 if (tcp_fastopen_defer_connect(sk, &err))
308 return err;
309 if (err)
310 goto failure;
311
Andrey Vagin2b916472012-11-22 01:13:58 +0000312 err = tcp_connect(sk);
Pavel Emelyanovee995282012-04-19 03:40:39 +0000313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 if (err)
315 goto failure;
316
317 return 0;
318
319failure:
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200320 /*
321 * This unhashes the socket and releases the local port,
322 * if necessary.
323 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 tcp_set_state(sk, TCP_CLOSE);
325 ip_rt_put(rt);
326 sk->sk_route_caps = 0;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000327 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 return err;
329}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000330EXPORT_SYMBOL(tcp_v4_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332/*
Eric Dumazet563d34d2012-07-23 09:48:52 +0200333 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
334 * It can be called through tcp_release_cb() if socket was owned by user
335 * at the time tcp_v4_err() was called to handle ICMP message.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 */
Neal Cardwell4fab9072014-08-14 12:40:05 -0400337void tcp_v4_mtu_reduced(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800340 struct dst_entry *dst;
341 u32 mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800343 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
344 return;
Eric Dumazet561022a2021-07-02 13:09:03 -0700345 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
David S. Miller80d0a692012-07-16 03:28:06 -0700346 dst = inet_csk_update_pmtu(sk, mtu);
347 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 return;
349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 /* Something is about to be wrong... Remember soft error
351 * for the case, if this connection will not able to recover.
352 */
353 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
354 sk->sk_err_soft = EMSGSIZE;
355
356 mtu = dst_mtu(dst);
357
358 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +0100359 ip_sk_accept_pmtu(sk) &&
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800360 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 tcp_sync_mss(sk, mtu);
362
363 /* Resend the TCP packet because it's
364 * clear that the old packet has been
365 * dropped. This is the new "fast" path mtu
366 * discovery.
367 */
368 tcp_simple_retransmit(sk);
369 } /* else let the usual retransmit timer handle it */
370}
Neal Cardwell4fab9072014-08-14 12:40:05 -0400371EXPORT_SYMBOL(tcp_v4_mtu_reduced);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
David S. Miller55be7a92012-07-11 21:27:49 -0700373static void do_redirect(struct sk_buff *skb, struct sock *sk)
374{
375 struct dst_entry *dst = __sk_dst_check(sk, 0);
376
David S. Miller1ed5c482012-07-12 00:41:25 -0700377 if (dst)
David S. Miller6700c272012-07-17 03:29:28 -0700378 dst->ops->redirect(dst, sk, skb);
David S. Miller55be7a92012-07-11 21:27:49 -0700379}
380
Eric Dumazet26e37362015-03-22 10:22:22 -0700381
382/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
Eric Dumazet9cf74902016-02-02 19:31:12 -0800383void tcp_req_err(struct sock *sk, u32 seq, bool abort)
Eric Dumazet26e37362015-03-22 10:22:22 -0700384{
385 struct request_sock *req = inet_reqsk(sk);
386 struct net *net = sock_net(sk);
387
388 /* ICMPs are not backlogged, hence we cannot get
389 * an established socket here.
390 */
Eric Dumazet26e37362015-03-22 10:22:22 -0700391 if (seq != tcp_rsk(req)->snt_isn) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700392 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800393 } else if (abort) {
Eric Dumazet26e37362015-03-22 10:22:22 -0700394 /*
395 * Still in SYN_RECV, just remove it silently.
396 * There is no good way to pass the error to the newly
397 * created socket, and POSIX does not want network
398 * errors returned from accept().
399 */
Fan Duc6973662015-03-23 15:00:41 -0700400 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
Eric Dumazet9caad862016-04-01 08:52:20 -0700401 tcp_listendrop(req->rsk_listener);
Eric Dumazet26e37362015-03-22 10:22:22 -0700402 }
Eric Dumazetef84d8c2015-10-14 11:16:26 -0700403 reqsk_put(req);
Eric Dumazet26e37362015-03-22 10:22:22 -0700404}
405EXPORT_SYMBOL(tcp_req_err);
406
Eric Dumazetf7456642020-05-26 19:48:49 -0700407/* TCP-LD (RFC 6069) logic */
Eric Dumazetd2924562020-05-27 17:34:58 -0700408void tcp_ld_RTO_revert(struct sock *sk, u32 seq)
Eric Dumazetf7456642020-05-26 19:48:49 -0700409{
410 struct inet_connection_sock *icsk = inet_csk(sk);
411 struct tcp_sock *tp = tcp_sk(sk);
412 struct sk_buff *skb;
413 s32 remaining;
414 u32 delta_us;
415
416 if (sock_owned_by_user(sk))
417 return;
418
419 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
420 !icsk->icsk_backoff)
421 return;
422
423 skb = tcp_rtx_queue_head(sk);
424 if (WARN_ON_ONCE(!skb))
425 return;
426
427 icsk->icsk_backoff--;
428 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT;
429 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
430
431 tcp_mstamp_refresh(tp);
432 delta_us = (u32)(tp->tcp_mstamp - tcp_skb_timestamp_us(skb));
433 remaining = icsk->icsk_rto - usecs_to_jiffies(delta_us);
434
435 if (remaining > 0) {
436 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
437 remaining, TCP_RTO_MAX);
438 } else {
439 /* RTO revert clocked out retransmission.
440 * Will retransmit now.
441 */
442 tcp_retransmit_timer(sk);
443 }
444}
Eric Dumazetd2924562020-05-27 17:34:58 -0700445EXPORT_SYMBOL(tcp_ld_RTO_revert);
Eric Dumazetf7456642020-05-26 19:48:49 -0700446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447/*
448 * This routine is called by the ICMP module when it gets some
449 * sort of error condition. If err < 0 then the socket should
450 * be closed and the error returned to the user. If err > 0
451 * it's just the icmp type << 8 | icmp code. After adjustment
452 * header points to the first 8 bytes of the tcp header. We need
453 * to find the appropriate port.
454 *
455 * The locking strategy used here is very "optimistic". When
456 * someone else accesses the socket the ICMP is just dropped
457 * and for some paths there is no check at all.
458 * A more general error queue to queue errors for later handling
459 * is probably better.
460 *
461 */
462
Eric Dumazeta12daf12020-05-26 19:48:50 -0700463int tcp_v4_err(struct sk_buff *skb, u32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464{
Eric Dumazeta12daf12020-05-26 19:48:50 -0700465 const struct iphdr *iph = (const struct iphdr *)skb->data;
466 struct tcphdr *th = (struct tcphdr *)(skb->data + (iph->ihl << 2));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 struct tcp_sock *tp;
468 struct inet_sock *inet;
Eric Dumazeta12daf12020-05-26 19:48:50 -0700469 const int type = icmp_hdr(skb)->type;
470 const int code = icmp_hdr(skb)->code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 struct sock *sk;
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700472 struct request_sock *fastopen;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700473 u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 int err;
Eric Dumazeta12daf12020-05-26 19:48:50 -0700475 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476
Eric Dumazet26e37362015-03-22 10:22:22 -0700477 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
478 th->dest, iph->saddr, ntohs(th->source),
Eric Dumazeta12daf12020-05-26 19:48:50 -0700479 inet_iif(skb), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 if (!sk) {
Eric Dumazet5d3848b2016-04-27 16:44:29 -0700481 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100482 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 }
484 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700485 inet_twsk_put(inet_twsk(sk));
Stefano Brivio32bbd872018-11-08 12:19:21 +0100486 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 }
Eric Dumazet26e37362015-03-22 10:22:22 -0700488 seq = ntohl(th->seq);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100489 if (sk->sk_state == TCP_NEW_SYN_RECV) {
490 tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB ||
491 type == ICMP_TIME_EXCEEDED ||
492 (type == ICMP_DEST_UNREACH &&
493 (code == ICMP_NET_UNREACH ||
494 code == ICMP_HOST_UNREACH)));
495 return 0;
496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 bh_lock_sock(sk);
499 /* If too many ICMPs get dropped on busy
500 * servers this needs to be solved differently.
Eric Dumazet563d34d2012-07-23 09:48:52 +0200501 * We do take care of PMTU discovery (RFC1191) special case :
502 * we can receive locally generated ICMP messages while socket is held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 */
Eric Dumazetb74aa932013-01-19 16:10:37 +0000504 if (sock_owned_by_user(sk)) {
505 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700506 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Eric Dumazetb74aa932013-01-19 16:10:37 +0000507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 if (sk->sk_state == TCP_CLOSE)
509 goto out;
510
Eric Dumazet020e71a2021-10-25 09:48:24 -0700511 if (static_branch_unlikely(&ip4_min_ttl)) {
512 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
513 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
514 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
515 goto out;
516 }
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000517 }
518
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700520 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
Eric Dumazetd983ea62019-10-10 20:17:38 -0700521 fastopen = rcu_dereference(tp->fastopen_rsk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700522 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700524 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700525 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 goto out;
527 }
528
529 switch (type) {
David S. Miller55be7a92012-07-11 21:27:49 -0700530 case ICMP_REDIRECT:
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100531 if (!sock_owned_by_user(sk))
Eric Dumazeta12daf12020-05-26 19:48:50 -0700532 do_redirect(skb, sk);
David S. Miller55be7a92012-07-11 21:27:49 -0700533 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 case ICMP_SOURCE_QUENCH:
535 /* Just silently ignore these. */
536 goto out;
537 case ICMP_PARAMETERPROB:
538 err = EPROTO;
539 break;
540 case ICMP_DEST_UNREACH:
541 if (code > NR_ICMP_UNREACH)
542 goto out;
543
544 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000545 /* We are not interested in TCP_LISTEN and open_requests
546 * (SYN-ACKs send out by Linux are always <576bytes so
547 * they should go through unfragmented).
548 */
549 if (sk->sk_state == TCP_LISTEN)
550 goto out;
551
Eric Dumazet561022a2021-07-02 13:09:03 -0700552 WRITE_ONCE(tp->mtu_info, info);
Eric Dumazet144d56e2012-08-20 00:22:46 +0000553 if (!sock_owned_by_user(sk)) {
Eric Dumazet563d34d2012-07-23 09:48:52 +0200554 tcp_v4_mtu_reduced(sk);
Eric Dumazet144d56e2012-08-20 00:22:46 +0000555 } else {
Eric Dumazet7aa54702016-12-03 11:14:57 -0800556 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
Eric Dumazet144d56e2012-08-20 00:22:46 +0000557 sock_hold(sk);
558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 goto out;
560 }
561
562 err = icmp_err_convert[code].errno;
Eric Dumazetf7456642020-05-26 19:48:49 -0700563 /* check if this ICMP message allows revert of backoff.
564 * (see RFC 6069)
565 */
566 if (!fastopen &&
567 (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))
568 tcp_ld_RTO_revert(sk, seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 break;
570 case ICMP_TIME_EXCEEDED:
571 err = EHOSTUNREACH;
572 break;
573 default:
574 goto out;
575 }
576
577 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700579 case TCP_SYN_RECV:
580 /* Only in fast or simultaneous open. If a fast open socket is
Randy Dunlap2bdcc732020-08-22 16:31:41 -0700581 * already accepted it is treated as a connected one below.
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700582 */
Ian Morris51456b22015-04-03 09:17:26 +0100583 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700584 break;
585
Eric Dumazeta12daf12020-05-26 19:48:50 -0700586 ip_icmp_error(sk, skb, err, th->dest, info, (u8 *)th);
Eric Dumazet45af29c2020-05-24 11:00:02 -0700587
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 sk->sk_err = err;
590
Alexander Aringe3ae2362021-06-27 18:48:21 -0400591 sk_error_report(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
593 tcp_done(sk);
594 } else {
595 sk->sk_err_soft = err;
596 }
597 goto out;
598 }
599
600 /* If we've already connected we will keep trying
601 * until we time out, or the user gives up.
602 *
603 * rfc1122 4.2.3.9 allows to consider as hard errors
604 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
605 * but it is obsoleted by pmtu discovery).
606 *
607 * Note, that in modern internet, where routing is unreliable
608 * and in each dark corner broken firewalls sit, sending random
609 * errors ordered by their masters even this two messages finally lose
610 * their original sense (even Linux sends invalid PORT_UNREACHs)
611 *
612 * Now we are in compliance with RFCs.
613 * --ANK (980905)
614 */
615
616 inet = inet_sk(sk);
617 if (!sock_owned_by_user(sk) && inet->recverr) {
618 sk->sk_err = err;
Alexander Aringe3ae2362021-06-27 18:48:21 -0400619 sk_error_report(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 } else { /* Only an error on timeout */
621 sk->sk_err_soft = err;
622 }
623
624out:
625 bh_unlock_sock(sk);
626 sock_put(sk);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100627 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628}
629
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000630void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631{
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700632 struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
Eric Dumazet98be9b12018-02-19 11:56:52 -0800634 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
635 skb->csum_start = skb_transport_header(skb) - skb->head;
636 skb->csum_offset = offsetof(struct tcphdr, check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637}
638
Herbert Xu419f9f82010-04-11 02:15:53 +0000639/* This routine computes an IPv4 TCP checksum. */
Herbert Xubb296242010-04-11 02:15:55 +0000640void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
Herbert Xu419f9f82010-04-11 02:15:53 +0000641{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400642 const struct inet_sock *inet = inet_sk(sk);
Herbert Xu419f9f82010-04-11 02:15:53 +0000643
644 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
645}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000646EXPORT_SYMBOL(tcp_v4_send_check);
Herbert Xu419f9f82010-04-11 02:15:53 +0000647
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648/*
649 * This routine will send an RST to the other tcp.
650 *
651 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
652 * for reset.
653 * Answer: if a packet caused RST, it is not for a socket
654 * existing in our system, if it is matched to a socket,
655 * it is just duplicate segment or bug in other side's TCP.
656 * So that we build reply only basing on parameters
657 * arrived with segment.
658 * Exception: precedence violation. We do not implement it in any case.
659 */
660
Florian Westphaldc87efd2021-04-01 16:19:44 -0700661#ifdef CONFIG_TCP_MD5SIG
662#define OPTION_BYTES TCPOLEN_MD5SIG_ALIGNED
663#else
664#define OPTION_BYTES sizeof(__be32)
665#endif
666
Eric Dumazeta00e7442015-09-29 07:42:39 -0700667static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400669 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800670 struct {
671 struct tcphdr th;
Florian Westphaldc87efd2021-04-01 16:19:44 -0700672 __be32 opt[OPTION_BYTES / sizeof(__be32)];
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800673 } rep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 struct ip_reply_arg arg;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800675#ifdef CONFIG_TCP_MD5SIG
Florian Westphale46787f2015-12-21 21:29:25 +0100676 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000677 const __u8 *hash_location = NULL;
678 unsigned char newhash[16];
679 int genhash;
680 struct sock *sk1 = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800681#endif
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700682 u64 transmit_time = 0;
Jon Maxwell00483692018-05-10 16:53:51 +1000683 struct sock *ctl_sk;
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700684 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
686 /* Never send a reset in response to a reset. */
687 if (th->rst)
688 return;
689
Eric Dumazetc3658e82014-11-25 07:40:04 -0800690 /* If sk not NULL, it means we did a successful lookup and incoming
691 * route had to be correct. prequeue might have dropped our dst.
692 */
693 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 return;
695
696 /* Swap the send and the receive. */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800697 memset(&rep, 0, sizeof(rep));
698 rep.th.dest = th->source;
699 rep.th.source = th->dest;
700 rep.th.doff = sizeof(struct tcphdr) / 4;
701 rep.th.rst = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702
703 if (th->ack) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800704 rep.th.seq = th->ack_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800706 rep.th.ack = 1;
707 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
708 skb->len - (th->doff << 2));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 }
710
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200711 memset(&arg, 0, sizeof(arg));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800712 arg.iov[0].iov_base = (unsigned char *)&rep;
713 arg.iov[0].iov_len = sizeof(rep.th);
714
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800715 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800716#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700717 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000718 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100719 if (sk && sk_fullsock(sk)) {
David Aherncea97602019-12-30 14:14:25 -0800720 const union tcp_md5_addr *addr;
David Aherndea53bb2019-12-30 14:14:28 -0800721 int l3index;
David Aherncea97602019-12-30 14:14:25 -0800722
David Aherndea53bb2019-12-30 14:14:28 -0800723 /* sdif set, means packet ingressed via a device
724 * in an L3 domain and inet_iif is set to it.
725 */
726 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
David Aherncea97602019-12-30 14:14:25 -0800727 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
David Aherndea53bb2019-12-30 14:14:28 -0800728 key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
Florian Westphale46787f2015-12-21 21:29:25 +0100729 } else if (hash_location) {
David Aherncea97602019-12-30 14:14:25 -0800730 const union tcp_md5_addr *addr;
David Ahern534322c2019-12-30 14:14:27 -0800731 int sdif = tcp_v4_sdif(skb);
732 int dif = inet_iif(skb);
David Aherndea53bb2019-12-30 14:14:28 -0800733 int l3index;
David Aherncea97602019-12-30 14:14:25 -0800734
Shawn Lu658ddaa2012-01-31 22:35:48 +0000735 /*
736 * active side is lost. Try to find listening socket through
737 * source port, and then find md5 key through listening socket.
738 * we are not loose security here:
739 * Incoming packet is checked with md5 hash with finding key,
740 * no RST generated if md5 hash doesn't match.
741 */
Craig Galleka5836362016-02-10 11:50:38 -0500742 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
743 ip_hdr(skb)->saddr,
Tom Herbertda5e3632013-01-22 09:50:24 +0000744 th->source, ip_hdr(skb)->daddr,
David Ahern534322c2019-12-30 14:14:27 -0800745 ntohs(th->source), dif, sdif);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000746 /* don't send rst if it can't find key */
747 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700748 goto out;
749
David Aherndea53bb2019-12-30 14:14:28 -0800750 /* sdif set, means packet ingressed via a device
751 * in an L3 domain and dif is set to it.
752 */
753 l3index = sdif ? dif : 0;
David Aherncea97602019-12-30 14:14:25 -0800754 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
David Aherndea53bb2019-12-30 14:14:28 -0800755 key = tcp_md5_do_lookup(sk1, l3index, addr, AF_INET);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000756 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700757 goto out;
758
Shawn Lu658ddaa2012-01-31 22:35:48 +0000759
Eric Dumazet39f8e582015-03-24 15:58:55 -0700760 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000761 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700762 goto out;
763
Shawn Lu658ddaa2012-01-31 22:35:48 +0000764 }
765
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800766 if (key) {
767 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
768 (TCPOPT_NOP << 16) |
769 (TCPOPT_MD5SIG << 8) |
770 TCPOLEN_MD5SIG);
771 /* Update length and the length the header thinks exists */
772 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
773 rep.th.doff = arg.iov[0].iov_len / 4;
774
Adam Langley49a72df2008-07-19 00:01:42 -0700775 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
Ilpo Järvinen78e645cb2008-10-09 14:37:47 -0700776 key, ip_hdr(skb)->saddr,
777 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800778 }
779#endif
Florian Westphaldc87efd2021-04-01 16:19:44 -0700780 /* Can't co-exist with TCPMD5, hence check rep.opt[0] */
781 if (rep.opt[0] == 0) {
782 __be32 mrst = mptcp_reset_option(skb);
783
784 if (mrst) {
785 rep.opt[0] = mrst;
786 arg.iov[0].iov_len += sizeof(mrst);
787 rep.th.doff = arg.iov[0].iov_len / 4;
788 }
789 }
790
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700791 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
792 ip_hdr(skb)->saddr, /* XXX */
Ilpo Järvinen52cd5752008-10-08 11:34:06 -0700793 arg.iov[0].iov_len, IPPROTO_TCP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
Florian Westphal271c3b92015-12-21 21:29:26 +0100795 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
796
Shawn Lue2446ea2012-02-04 12:38:09 +0000797 /* When socket is gone, all binding information is lost.
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000798 * routing might fail in this case. No choice here, if we choose to force
799 * input interface, we will misroute in case of asymmetric route.
Shawn Lue2446ea2012-02-04 12:38:09 +0000800 */
Song Liuc24b14c2017-10-23 09:20:24 -0700801 if (sk) {
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000802 arg.bound_dev_if = sk->sk_bound_dev_if;
Song Liu5c487bb2018-02-06 20:50:23 -0800803 if (sk_fullsock(sk))
804 trace_tcp_send_reset(sk, skb);
Song Liuc24b14c2017-10-23 09:20:24 -0700805 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
Florian Westphal271c3b92015-12-21 21:29:26 +0100807 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
808 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
809
Eric Dumazet66b13d92011-10-24 03:06:21 -0400810 arg.tos = ip_hdr(skb)->tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900811 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700812 local_bh_disable();
Eric Dumazet5472c3c2019-05-31 19:17:33 -0700813 ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
Eric Dumazeta842fe12019-06-12 11:57:25 -0700814 if (sk) {
Jon Maxwell00483692018-05-10 16:53:51 +1000815 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
816 inet_twsk(sk)->tw_mark : sk->sk_mark;
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -0700817 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
818 inet_twsk(sk)->tw_priority : sk->sk_priority;
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700819 transmit_time = tcp_transmit_time(sk);
Eric Dumazeta842fe12019-06-12 11:57:25 -0700820 }
Jon Maxwell00483692018-05-10 16:53:51 +1000821 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800822 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700823 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700824 &arg, arg.iov[0].iov_len,
825 transmit_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
Jon Maxwell00483692018-05-10 16:53:51 +1000827 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700828 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
829 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700830 local_bh_enable();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000831
832#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700833out:
834 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000835#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836}
837
838/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
839 outside socket context is ugly, certainly. What can I do?
840 */
841
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900842static void tcp_v4_send_ack(const struct sock *sk,
Eric Dumazete62a1232016-01-21 08:02:54 -0800843 struct sk_buff *skb, u32 seq, u32 ack,
Andrey Vaginee684b62013-02-11 05:50:19 +0000844 u32 win, u32 tsval, u32 tsecr, int oif,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700845 struct tcp_md5sig_key *key,
Eric Dumazet66b13d92011-10-24 03:06:21 -0400846 int reply_flags, u8 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400848 const struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 struct {
850 struct tcphdr th;
Al Viro714e85b2006-11-14 20:51:49 -0800851 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800852#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800853 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800854#endif
855 ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 } rep;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900857 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 struct ip_reply_arg arg;
Jon Maxwell00483692018-05-10 16:53:51 +1000859 struct sock *ctl_sk;
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700860 u64 transmit_time;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861
862 memset(&rep.th, 0, sizeof(struct tcphdr));
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200863 memset(&arg, 0, sizeof(arg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865 arg.iov[0].iov_base = (unsigned char *)&rep;
866 arg.iov[0].iov_len = sizeof(rep.th);
Andrey Vaginee684b62013-02-11 05:50:19 +0000867 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800868 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
869 (TCPOPT_TIMESTAMP << 8) |
870 TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000871 rep.opt[1] = htonl(tsval);
872 rep.opt[2] = htonl(tsecr);
Craig Schlentercb48cfe2007-01-09 00:11:15 -0800873 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874 }
875
876 /* Swap the send and the receive. */
877 rep.th.dest = th->source;
878 rep.th.source = th->dest;
879 rep.th.doff = arg.iov[0].iov_len / 4;
880 rep.th.seq = htonl(seq);
881 rep.th.ack_seq = htonl(ack);
882 rep.th.ack = 1;
883 rep.th.window = htons(win);
884
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800885#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800886 if (key) {
Andrey Vaginee684b62013-02-11 05:50:19 +0000887 int offset = (tsecr) ? 3 : 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800888
889 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
890 (TCPOPT_NOP << 16) |
891 (TCPOPT_MD5SIG << 8) |
892 TCPOLEN_MD5SIG);
893 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
894 rep.th.doff = arg.iov[0].iov_len/4;
895
Adam Langley49a72df2008-07-19 00:01:42 -0700896 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
Adam Langley90b7e112008-07-31 20:49:48 -0700897 key, ip_hdr(skb)->saddr,
898 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800899 }
900#endif
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700901 arg.flags = reply_flags;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700902 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
903 ip_hdr(skb)->saddr, /* XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 arg.iov[0].iov_len, IPPROTO_TCP, 0);
905 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900906 if (oif)
907 arg.bound_dev_if = oif;
Eric Dumazet66b13d92011-10-24 03:06:21 -0400908 arg.tos = tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900909 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700910 local_bh_disable();
Eric Dumazet5472c3c2019-05-31 19:17:33 -0700911 ctl_sk = this_cpu_read(*net->ipv4.tcp_sk);
Eric Dumazeta842fe12019-06-12 11:57:25 -0700912 ctl_sk->sk_mark = (sk->sk_state == TCP_TIME_WAIT) ?
913 inet_twsk(sk)->tw_mark : sk->sk_mark;
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -0700914 ctl_sk->sk_priority = (sk->sk_state == TCP_TIME_WAIT) ?
915 inet_twsk(sk)->tw_priority : sk->sk_priority;
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700916 transmit_time = tcp_transmit_time(sk);
Jon Maxwell00483692018-05-10 16:53:51 +1000917 ip_send_unicast_reply(ctl_sk,
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800918 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700919 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700920 &arg, arg.iov[0].iov_len,
921 transmit_time);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922
Jon Maxwell00483692018-05-10 16:53:51 +1000923 ctl_sk->sk_mark = 0;
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700924 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700925 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926}
927
928static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
929{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700930 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800931 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900933 tcp_v4_send_ack(sk, skb,
Eric Dumazete62a1232016-01-21 08:02:54 -0800934 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200935 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700936 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900937 tcptw->tw_ts_recent,
938 tw->tw_bound_dev_if,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700939 tcp_twsk_md5_key(tcptw),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400940 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
941 tw->tw_tos
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900942 );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700944 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945}
946
Eric Dumazeta00e7442015-09-29 07:42:39 -0700947static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200948 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949{
David Aherncea97602019-12-30 14:14:25 -0800950 const union tcp_md5_addr *addr;
David Aherndea53bb2019-12-30 14:14:28 -0800951 int l3index;
David Aherncea97602019-12-30 14:14:25 -0800952
Jerry Chu168a8f52012-08-31 12:29:13 +0000953 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
954 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
955 */
Eric Dumazete62a1232016-01-21 08:02:54 -0800956 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
957 tcp_sk(sk)->snd_nxt;
958
Eric Dumazet20a2b492016-08-22 11:31:10 -0700959 /* RFC 7323 2.3
960 * The window field (SEG.WND) of every outgoing segment, with the
961 * exception of <SYN> segments, MUST be right-shifted by
962 * Rcv.Wind.Shift bits:
963 */
David Aherncea97602019-12-30 14:14:25 -0800964 addr = (union tcp_md5_addr *)&ip_hdr(skb)->saddr;
David Aherndea53bb2019-12-30 14:14:28 -0800965 l3index = tcp_v4_sdif(skb) ? inet_iif(skb) : 0;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900966 tcp_v4_send_ack(sk, skb, seq,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700967 tcp_rsk(req)->rcv_nxt,
968 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700969 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900970 req->ts_recent,
971 0,
David Aherndea53bb2019-12-30 14:14:28 -0800972 tcp_md5_do_lookup(sk, l3index, addr, AF_INET),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400973 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
974 ip_hdr(skb)->tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975}
976
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977/*
Kris Katterjohn9bf1d832008-02-17 22:29:19 -0800978 * Send a SYN-ACK after having received a SYN.
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700979 * This still operates on a request_sock only, not on a big
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 * socket.
981 */
Eric Dumazet0f935db2015-09-25 07:39:21 -0700982static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300983 struct flowi *fl,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800984 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700985 struct tcp_fastopen_cookie *foc,
Martin KaFai Lau331fca42020-08-20 12:00:52 -0700986 enum tcp_synack_type synack_type,
987 struct sk_buff *syn_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700989 const struct inet_request_sock *ireq = inet_rsk(req);
David S. Miller6bd023f2011-05-18 18:32:03 -0400990 struct flowi4 fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 int err = -1;
Weilong Chend41db5a2013-12-23 14:37:28 +0800992 struct sk_buff *skb;
Wei Wangac8f1712020-09-09 17:50:48 -0700993 u8 tos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
995 /* First, grab a route. */
David S. Millerba3f7f02012-07-17 14:02:46 -0700996 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800997 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Martin KaFai Lau331fca42020-08-20 12:00:52 -0700999 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -07001002 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
Alexander Duyck407c85c2020-11-20 19:47:44 -08001004 tos = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
Wei Wang8ef44b62020-12-08 09:55:08 -08001005 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
1006 (inet_sk(sk)->tos & INET_ECN_MASK) :
Alexander Duyck407c85c2020-11-20 19:47:44 -08001007 inet_sk(sk)->tos;
1008
1009 if (!INET_ECN_is_capable(tos) &&
1010 tcp_bpf_ca_needs_ecn((struct sock *)req))
1011 tos |= INET_ECN_ECT_0;
1012
Eric Dumazet2ab2ddd2018-10-02 12:35:05 -07001013 rcu_read_lock();
Eric Dumazet634fb9792013-10-09 15:21:29 -07001014 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
1015 ireq->ir_rmt_addr,
Wei Wangde033b7d2020-09-09 17:50:47 -07001016 rcu_dereference(ireq->ireq_opt),
Alexander Duyck861602b2020-11-19 13:23:51 -08001017 tos);
Eric Dumazet2ab2ddd2018-10-02 12:35:05 -07001018 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -02001019 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 }
1021
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 return err;
1023}
1024
1025/*
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001026 * IPv4 request_sock destructor.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 */
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001028static void tcp_v4_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029{
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001030 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031}
1032
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001033#ifdef CONFIG_TCP_MD5SIG
1034/*
1035 * RFC2385 MD5 checksumming requires a mapping of
1036 * IP address->MD5 Key.
1037 * We need to maintain these in the sk structure.
1038 */
1039
Eric Dumazet921f9a02019-02-26 09:49:11 -08001040DEFINE_STATIC_KEY_FALSE(tcp_md5_needed);
Eric Dumazet6015c712018-11-27 15:03:21 -08001041EXPORT_SYMBOL(tcp_md5_needed);
1042
Leonard Crestez86f1e3a2021-10-15 10:26:04 +03001043static bool better_md5_match(struct tcp_md5sig_key *old, struct tcp_md5sig_key *new)
1044{
1045 if (!old)
1046 return true;
1047
1048 /* l3index always overrides non-l3index */
1049 if (old->l3index && new->l3index == 0)
1050 return false;
1051 if (old->l3index == 0 && new->l3index)
1052 return true;
1053
1054 return old->prefixlen < new->prefixlen;
1055}
1056
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001057/* Find the Key structure for an address. */
David Aherndea53bb2019-12-30 14:14:28 -08001058struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
Eric Dumazet6015c712018-11-27 15:03:21 -08001059 const union tcp_md5_addr *addr,
1060 int family)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001061{
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001062 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001063 struct tcp_md5sig_key *key;
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001064 const struct tcp_md5sig_info *md5sig;
Ivan Delalande67973182017-06-15 18:07:06 -07001065 __be32 mask;
1066 struct tcp_md5sig_key *best_match = NULL;
1067 bool match;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001068
Eric Dumazeta8afca02012-01-31 18:45:40 +00001069 /* caller either holds rcu_read_lock() or socket lock */
1070 md5sig = rcu_dereference_check(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +02001071 lockdep_sock_is_held(sk));
Eric Dumazeta8afca02012-01-31 18:45:40 +00001072 if (!md5sig)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001073 return NULL;
Arnd Bergmann083a0322017-06-20 22:11:21 +02001074
Amol Groverc8b91772020-02-21 23:27:14 +05301075 hlist_for_each_entry_rcu(key, &md5sig->head, node,
1076 lockdep_sock_is_held(sk)) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001077 if (key->family != family)
1078 continue;
Leonard Cresteza76c2312021-10-15 10:26:05 +03001079 if (key->flags & TCP_MD5SIG_FLAG_IFINDEX && key->l3index != l3index)
David Aherndea53bb2019-12-30 14:14:28 -08001080 continue;
Ivan Delalande67973182017-06-15 18:07:06 -07001081 if (family == AF_INET) {
1082 mask = inet_make_mask(key->prefixlen);
1083 match = (key->addr.a4.s_addr & mask) ==
1084 (addr->a4.s_addr & mask);
1085#if IS_ENABLED(CONFIG_IPV6)
1086 } else if (family == AF_INET6) {
1087 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
1088 key->prefixlen);
1089#endif
1090 } else {
1091 match = false;
1092 }
1093
Leonard Crestez86f1e3a2021-10-15 10:26:04 +03001094 if (match && better_md5_match(best_match, key))
Ivan Delalande67973182017-06-15 18:07:06 -07001095 best_match = key;
1096 }
1097 return best_match;
1098}
Eric Dumazet6015c712018-11-27 15:03:21 -08001099EXPORT_SYMBOL(__tcp_md5_do_lookup);
Ivan Delalande67973182017-06-15 18:07:06 -07001100
Wu Fengguange8f37d52017-07-06 07:58:53 +08001101static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
1102 const union tcp_md5_addr *addr,
David Aherndea53bb2019-12-30 14:14:28 -08001103 int family, u8 prefixlen,
Leonard Cresteza76c2312021-10-15 10:26:05 +03001104 int l3index, u8 flags)
Ivan Delalande67973182017-06-15 18:07:06 -07001105{
1106 const struct tcp_sock *tp = tcp_sk(sk);
1107 struct tcp_md5sig_key *key;
1108 unsigned int size = sizeof(struct in_addr);
1109 const struct tcp_md5sig_info *md5sig;
1110
1111 /* caller either holds rcu_read_lock() or socket lock */
1112 md5sig = rcu_dereference_check(tp->md5sig_info,
1113 lockdep_sock_is_held(sk));
1114 if (!md5sig)
1115 return NULL;
1116#if IS_ENABLED(CONFIG_IPV6)
1117 if (family == AF_INET6)
1118 size = sizeof(struct in6_addr);
1119#endif
Amol Groverc8b91772020-02-21 23:27:14 +05301120 hlist_for_each_entry_rcu(key, &md5sig->head, node,
1121 lockdep_sock_is_held(sk)) {
Ivan Delalande67973182017-06-15 18:07:06 -07001122 if (key->family != family)
1123 continue;
Leonard Cresteza76c2312021-10-15 10:26:05 +03001124 if ((key->flags & TCP_MD5SIG_FLAG_IFINDEX) != (flags & TCP_MD5SIG_FLAG_IFINDEX))
1125 continue;
Leonard Crestez86f1e3a2021-10-15 10:26:04 +03001126 if (key->l3index != l3index)
David Aherndea53bb2019-12-30 14:14:28 -08001127 continue;
Ivan Delalande67973182017-06-15 18:07:06 -07001128 if (!memcmp(&key->addr, addr, size) &&
1129 key->prefixlen == prefixlen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001130 return key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001131 }
1132 return NULL;
1133}
1134
Eric Dumazetb83e3de2015-09-25 07:39:15 -07001135struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001136 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001137{
Eric Dumazetb52e6922015-04-09 14:36:42 -07001138 const union tcp_md5_addr *addr;
David Aherndea53bb2019-12-30 14:14:28 -08001139 int l3index;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001140
David Aherndea53bb2019-12-30 14:14:28 -08001141 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
1142 addr_sk->sk_bound_dev_if);
Eric Dumazetb52e6922015-04-09 14:36:42 -07001143 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
David Aherndea53bb2019-12-30 14:14:28 -08001144 return tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001145}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001146EXPORT_SYMBOL(tcp_v4_md5_lookup);
1147
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001148/* This can be called on a newly created socket, from other files */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001149int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
Leonard Cresteza76c2312021-10-15 10:26:05 +03001150 int family, u8 prefixlen, int l3index, u8 flags,
David Aherndea53bb2019-12-30 14:14:28 -08001151 const u8 *newkey, u8 newkeylen, gfp_t gfp)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001152{
1153 /* Add Key to the list */
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001154 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001155 struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001156 struct tcp_md5sig_info *md5sig;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001157
Leonard Cresteza76c2312021-10-15 10:26:05 +03001158 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001159 if (key) {
Eric Dumazete6ced832020-07-01 11:43:04 -07001160 /* Pre-existing entry - just update that one.
1161 * Note that the key might be used concurrently.
1162 * data_race() is telling kcsan that we do not care of
1163 * key mismatches, since changing MD5 key on live flows
1164 * can lead to packet drops.
1165 */
1166 data_race(memcpy(key->key, newkey, newkeylen));
Eric Dumazet6a2febe2020-06-30 16:41:01 -07001167
Eric Dumazete6ced832020-07-01 11:43:04 -07001168 /* Pairs with READ_ONCE() in tcp_md5_hash_key().
1169 * Also note that a reader could catch new key->keylen value
1170 * but old key->key[], this is the reason we use __GFP_ZERO
1171 * at sock_kmalloc() time below these lines.
1172 */
1173 WRITE_ONCE(key->keylen, newkeylen);
Eric Dumazet6a2febe2020-06-30 16:41:01 -07001174
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001175 return 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001176 }
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001177
Eric Dumazeta8afca02012-01-31 18:45:40 +00001178 md5sig = rcu_dereference_protected(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +02001179 lockdep_sock_is_held(sk));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001180 if (!md5sig) {
1181 md5sig = kmalloc(sizeof(*md5sig), gfp);
1182 if (!md5sig)
1183 return -ENOMEM;
1184
1185 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1186 INIT_HLIST_HEAD(&md5sig->head);
Eric Dumazeta8afca02012-01-31 18:45:40 +00001187 rcu_assign_pointer(tp->md5sig_info, md5sig);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001188 }
1189
Eric Dumazete6ced832020-07-01 11:43:04 -07001190 key = sock_kmalloc(sk, sizeof(*key), gfp | __GFP_ZERO);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001191 if (!key)
1192 return -ENOMEM;
Eric Dumazet71cea172013-05-20 06:52:26 +00001193 if (!tcp_alloc_md5sig_pool()) {
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001194 sock_kfree_s(sk, key, sizeof(*key));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001195 return -ENOMEM;
1196 }
1197
1198 memcpy(key->key, newkey, newkeylen);
1199 key->keylen = newkeylen;
1200 key->family = family;
Ivan Delalande67973182017-06-15 18:07:06 -07001201 key->prefixlen = prefixlen;
David Aherndea53bb2019-12-30 14:14:28 -08001202 key->l3index = l3index;
Leonard Cresteza76c2312021-10-15 10:26:05 +03001203 key->flags = flags;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001204 memcpy(&key->addr, addr,
1205 (family == AF_INET6) ? sizeof(struct in6_addr) :
1206 sizeof(struct in_addr));
1207 hlist_add_head_rcu(&key->node, &md5sig->head);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001208 return 0;
1209}
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001210EXPORT_SYMBOL(tcp_md5_do_add);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001211
Ivan Delalande67973182017-06-15 18:07:06 -07001212int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
Leonard Cresteza76c2312021-10-15 10:26:05 +03001213 u8 prefixlen, int l3index, u8 flags)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001214{
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001215 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001216
Leonard Cresteza76c2312021-10-15 10:26:05 +03001217 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen, l3index, flags);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001218 if (!key)
1219 return -ENOENT;
1220 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001221 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001222 kfree_rcu(key, rcu);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001223 return 0;
1224}
1225EXPORT_SYMBOL(tcp_md5_do_del);
1226
stephen hemmingere0683e702012-10-26 14:31:40 +00001227static void tcp_clear_md5_list(struct sock *sk)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001228{
1229 struct tcp_sock *tp = tcp_sk(sk);
1230 struct tcp_md5sig_key *key;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001231 struct hlist_node *n;
Eric Dumazeta8afca02012-01-31 18:45:40 +00001232 struct tcp_md5sig_info *md5sig;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001233
Eric Dumazeta8afca02012-01-31 18:45:40 +00001234 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1235
Sasha Levinb67bfe02013-02-27 17:06:00 -08001236 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001237 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001238 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001239 kfree_rcu(key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001240 }
1241}
1242
Ivan Delalande8917a772017-06-15 18:07:07 -07001243static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
Christoph Hellwigd4c19c42020-07-23 08:09:05 +02001244 sockptr_t optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001245{
1246 struct tcp_md5sig cmd;
1247 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
David Aherncea97602019-12-30 14:14:25 -08001248 const union tcp_md5_addr *addr;
Ivan Delalande8917a772017-06-15 18:07:07 -07001249 u8 prefixlen = 32;
David Aherndea53bb2019-12-30 14:14:28 -08001250 int l3index = 0;
Leonard Cresteza76c2312021-10-15 10:26:05 +03001251 u8 flags;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001252
1253 if (optlen < sizeof(cmd))
1254 return -EINVAL;
1255
Christoph Hellwigd4c19c42020-07-23 08:09:05 +02001256 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001257 return -EFAULT;
1258
1259 if (sin->sin_family != AF_INET)
1260 return -EINVAL;
1261
Leonard Cresteza76c2312021-10-15 10:26:05 +03001262 flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
1263
Ivan Delalande8917a772017-06-15 18:07:07 -07001264 if (optname == TCP_MD5SIG_EXT &&
1265 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1266 prefixlen = cmd.tcpm_prefixlen;
1267 if (prefixlen > 32)
1268 return -EINVAL;
1269 }
1270
Leonard Cresteza76c2312021-10-15 10:26:05 +03001271 if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
David Ahern6b102db2019-12-30 14:14:29 -08001272 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
1273 struct net_device *dev;
1274
1275 rcu_read_lock();
1276 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
1277 if (dev && netif_is_l3_master(dev))
1278 l3index = dev->ifindex;
1279
1280 rcu_read_unlock();
1281
1282 /* ok to reference set/not set outside of rcu;
1283 * right now device MUST be an L3 master
1284 */
1285 if (!dev || !l3index)
1286 return -EINVAL;
1287 }
1288
David Aherncea97602019-12-30 14:14:25 -08001289 addr = (union tcp_md5_addr *)&sin->sin_addr.s_addr;
1290
Dmitry Popov64a124e2014-08-03 22:45:19 +04001291 if (!cmd.tcpm_keylen)
Leonard Cresteza76c2312021-10-15 10:26:05 +03001292 return tcp_md5_do_del(sk, addr, AF_INET, prefixlen, l3index, flags);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001293
1294 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1295 return -EINVAL;
1296
Leonard Cresteza76c2312021-10-15 10:26:05 +03001297 return tcp_md5_do_add(sk, addr, AF_INET, prefixlen, l3index, flags,
David Aherncea97602019-12-30 14:14:25 -08001298 cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001299}
1300
Eric Dumazet19689e32016-06-27 18:51:53 +02001301static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1302 __be32 daddr, __be32 saddr,
1303 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001304{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001305 struct tcp4_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -07001306 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +02001307 struct tcphdr *_th;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001308
Eric Dumazet19689e32016-06-27 18:51:53 +02001309 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001310 bp->saddr = saddr;
1311 bp->daddr = daddr;
1312 bp->pad = 0;
YOSHIFUJI Hideaki076fb722008-04-17 12:48:12 +09001313 bp->protocol = IPPROTO_TCP;
Adam Langley49a72df2008-07-19 00:01:42 -07001314 bp->len = cpu_to_be16(nbytes);
David S. Millerc7da57a2007-10-26 00:41:21 -07001315
Eric Dumazet19689e32016-06-27 18:51:53 +02001316 _th = (struct tcphdr *)(bp + 1);
1317 memcpy(_th, th, sizeof(*th));
1318 _th->check = 0;
1319
1320 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1321 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1322 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +08001323 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -07001324}
1325
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001326static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001327 __be32 daddr, __be32 saddr, const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -07001328{
1329 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001330 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -07001331
1332 hp = tcp_get_md5sig_pool();
1333 if (!hp)
1334 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001335 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001336
Herbert Xucf80e0e2016-01-24 21:20:23 +08001337 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001338 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +02001339 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -07001340 goto clear_hash;
1341 if (tcp_md5_hash_key(hp, key))
1342 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001343 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1344 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001345 goto clear_hash;
1346
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001347 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001348 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -07001349
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001350clear_hash:
1351 tcp_put_md5sig_pool();
1352clear_hash_noput:
1353 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -07001354 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001355}
1356
Eric Dumazet39f8e582015-03-24 15:58:55 -07001357int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1358 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001359 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001360{
Adam Langley49a72df2008-07-19 00:01:42 -07001361 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001362 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001363 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001364 __be32 saddr, daddr;
1365
Eric Dumazet39f8e582015-03-24 15:58:55 -07001366 if (sk) { /* valid for establish/request sockets */
1367 saddr = sk->sk_rcv_saddr;
1368 daddr = sk->sk_daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001369 } else {
Adam Langley49a72df2008-07-19 00:01:42 -07001370 const struct iphdr *iph = ip_hdr(skb);
1371 saddr = iph->saddr;
1372 daddr = iph->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001373 }
Adam Langley49a72df2008-07-19 00:01:42 -07001374
1375 hp = tcp_get_md5sig_pool();
1376 if (!hp)
1377 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001378 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001379
Herbert Xucf80e0e2016-01-24 21:20:23 +08001380 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001381 goto clear_hash;
1382
Eric Dumazet19689e32016-06-27 18:51:53 +02001383 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -07001384 goto clear_hash;
1385 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1386 goto clear_hash;
1387 if (tcp_md5_hash_key(hp, key))
1388 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001389 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1390 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001391 goto clear_hash;
1392
1393 tcp_put_md5sig_pool();
1394 return 0;
1395
1396clear_hash:
1397 tcp_put_md5sig_pool();
1398clear_hash_noput:
1399 memset(md5_hash, 0, 16);
1400 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001401}
Adam Langley49a72df2008-07-19 00:01:42 -07001402EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001403
Eric Dumazetba8e2752015-10-02 11:43:28 -07001404#endif
1405
Eric Dumazetff74e232015-03-24 15:58:54 -07001406/* Called with rcu_read_lock() */
Eric Dumazetba8e2752015-10-02 11:43:28 -07001407static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
David Ahern534322c2019-12-30 14:14:27 -08001408 const struct sk_buff *skb,
1409 int dif, int sdif)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001410{
Eric Dumazetba8e2752015-10-02 11:43:28 -07001411#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001412 /*
1413 * This gets called for each TCP segment that arrives
1414 * so we want to be efficient.
1415 * We have 3 drop cases:
1416 * o No MD5 hash and one expected.
1417 * o MD5 hash and we're not expecting one.
1418 * o MD5 hash and its wrong.
1419 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001420 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001421 struct tcp_md5sig_key *hash_expected;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001422 const struct iphdr *iph = ip_hdr(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001423 const struct tcphdr *th = tcp_hdr(skb);
David Aherncea97602019-12-30 14:14:25 -08001424 const union tcp_md5_addr *addr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001425 unsigned char newhash[16];
David Aherndea53bb2019-12-30 14:14:28 -08001426 int genhash, l3index;
1427
1428 /* sdif set, means packet ingressed via a device
1429 * in an L3 domain and dif is set to the l3mdev
1430 */
1431 l3index = sdif ? dif : 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001432
David Aherncea97602019-12-30 14:14:25 -08001433 addr = (union tcp_md5_addr *)&iph->saddr;
David Aherndea53bb2019-12-30 14:14:28 -08001434 hash_expected = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +09001435 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001436
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001437 /* We've parsed the options - do we have a hash? */
1438 if (!hash_expected && !hash_location)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001439 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001440
1441 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001442 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001443 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001444 }
1445
1446 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001447 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001448 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001449 }
1450
1451 /* Okay, so this is hash_expected and hash_location -
1452 * so we need to calculate the checksum.
1453 */
Adam Langley49a72df2008-07-19 00:01:42 -07001454 genhash = tcp_v4_md5_hash_skb(newhash,
1455 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -07001456 NULL, skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001457
1458 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -07001459 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
David Aherndea53bb2019-12-30 14:14:28 -08001460 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s L3 index %d\n",
Joe Perchese87cc472012-05-13 21:56:26 +00001461 &iph->saddr, ntohs(th->source),
1462 &iph->daddr, ntohs(th->dest),
1463 genhash ? " tcp_v4_calc_md5_hash failed"
David Aherndea53bb2019-12-30 14:14:28 -08001464 : "", l3index);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001465 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001466 }
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001467 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001468#endif
Eric Dumazetba8e2752015-10-02 11:43:28 -07001469 return false;
1470}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001471
Eric Dumazetb40cf182015-09-25 07:39:08 -07001472static void tcp_v4_init_req(struct request_sock *req,
1473 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +03001474 struct sk_buff *skb)
1475{
1476 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001477 struct net *net = sock_net(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +03001478
Eric Dumazet08d2cc3b2015-03-18 14:05:38 -07001479 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1480 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001481 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
Octavian Purdila16bea702014-06-25 17:09:53 +03001482}
1483
Eric Dumazetf9646292015-09-29 07:42:50 -07001484static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
Florian Westphal7ea851d2020-11-30 16:36:30 +01001485 struct sk_buff *skb,
Eric Dumazetf9646292015-09-29 07:42:50 -07001486 struct flowi *fl,
Florian Westphal7ea851d2020-11-30 16:36:30 +01001487 struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +03001488{
Florian Westphal7ea851d2020-11-30 16:36:30 +01001489 tcp_v4_init_req(req, sk, skb);
1490
1491 if (security_inet_conn_request(sk, skb, req))
1492 return NULL;
1493
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001494 return inet_csk_route_req(sk, &fl->u.ip4, req);
Octavian Purdilad94e0412014-06-25 17:09:55 +03001495}
1496
Eric Dumazet72a3eff2006-11-16 02:30:37 -08001497struct request_sock_ops tcp_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 .family = PF_INET,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001499 .obj_size = sizeof(struct tcp_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +03001500 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001501 .send_ack = tcp_v4_reqsk_send_ack,
1502 .destructor = tcp_v4_reqsk_destructor,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 .send_reset = tcp_v4_send_reset,
stephen hemminger688d1942014-08-29 23:32:05 -07001504 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505};
1506
Mat Martineau35b2c322020-01-09 07:59:21 -08001507const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +03001508 .mss_clamp = TCP_MSS_DEFAULT,
Octavian Purdila16bea702014-06-25 17:09:53 +03001509#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001510 .req_md5_lookup = tcp_v4_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +00001511 .calc_md5_hash = tcp_v4_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -08001512#endif
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001513#ifdef CONFIG_SYN_COOKIES
1514 .cookie_init_seq = cookie_v4_init_sequence,
1515#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +03001516 .route_req = tcp_v4_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -07001517 .init_seq = tcp_v4_init_seq,
1518 .init_ts_off = tcp_v4_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +03001519 .send_synack = tcp_v4_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +03001520};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001521
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1523{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 /* Never answer to SYNs send to broadcast or multicast */
Eric Dumazet511c3f92009-06-02 05:14:27 +00001525 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 goto drop;
1527
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001528 return tcp_conn_request(&tcp_request_sock_ops,
1529 &tcp_request_sock_ipv4_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001532 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 return 0;
1534}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001535EXPORT_SYMBOL(tcp_v4_conn_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
1537
1538/*
1539 * The three way handshake has completed - we got a valid synack -
1540 * now create the new socket.
1541 */
Eric Dumazet0c271712015-09-29 07:42:48 -07001542struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001543 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001544 struct dst_entry *dst,
1545 struct request_sock *req_unhash,
1546 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001548 struct inet_request_sock *ireq;
Ricardo Dias01770a12020-11-20 11:11:33 +00001549 bool found_dup_sk = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 struct inet_sock *newinet;
1551 struct tcp_sock *newtp;
1552 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001553#ifdef CONFIG_TCP_MD5SIG
David Aherncea97602019-12-30 14:14:25 -08001554 const union tcp_md5_addr *addr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001555 struct tcp_md5sig_key *key;
David Aherndea53bb2019-12-30 14:14:28 -08001556 int l3index;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001557#endif
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001558 struct ip_options_rcu *inet_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
1560 if (sk_acceptq_is_full(sk))
1561 goto exit_overflow;
1562
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563 newsk = tcp_create_openreq_child(sk, req, skb);
1564 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001565 goto exit_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566
Herbert Xubcd76112006-06-30 13:36:35 -07001567 newsk->sk_gso_type = SKB_GSO_TCPV4;
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001568 inet_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569
1570 newtp = tcp_sk(newsk);
1571 newinet = inet_sk(newsk);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001572 ireq = inet_rsk(req);
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001573 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1574 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
David Ahern6dd9a142015-12-16 13:20:44 -08001575 newsk->sk_bound_dev_if = ireq->ir_iif;
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001576 newinet->inet_saddr = ireq->ir_loc_addr;
1577 inet_opt = rcu_dereference(ireq->ireq_opt);
1578 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001579 newinet->mc_index = inet_iif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001580 newinet->mc_ttl = ip_hdr(skb)->ttl;
Jiri Benc4c507d22012-02-09 09:35:49 +00001581 newinet->rcv_tos = ip_hdr(skb)->tos;
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001582 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001583 if (inet_opt)
1584 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Eric Dumazeta904a062019-11-01 10:32:19 -07001585 newinet->inet_id = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586
Wei Wang8ef44b62020-12-08 09:55:08 -08001587 /* Set ToS of the new socket based upon the value of incoming SYN.
1588 * ECT bits are set later in tcp_init_transfer().
1589 */
Wei Wangac8f1712020-09-09 17:50:48 -07001590 if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
1591 newinet->tos = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1592
Eric Dumazetdfd25ff2012-03-10 09:20:21 +00001593 if (!dst) {
1594 dst = inet_csk_route_child_sock(sk, newsk, req);
1595 if (!dst)
1596 goto put_and_exit;
1597 } else {
1598 /* syncookie case : see end of cookie_v4_check() */
1599 }
David S. Miller0e734412011-05-08 15:28:03 -07001600 sk_setup_caps(newsk, dst);
1601
Daniel Borkmann81164412015-01-05 23:57:48 +01001602 tcp_ca_openreq_child(newsk, dst);
1603
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001605 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Tom Quetchenbachf5fff5d2008-09-21 00:21:51 -07001606
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 tcp_initialize_rcv_mss(newsk);
1608
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001609#ifdef CONFIG_TCP_MD5SIG
David Aherndea53bb2019-12-30 14:14:28 -08001610 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001611 /* Copy over the MD5 key from the original socket */
David Aherncea97602019-12-30 14:14:25 -08001612 addr = (union tcp_md5_addr *)&newinet->inet_daddr;
David Aherndea53bb2019-12-30 14:14:28 -08001613 key = tcp_md5_do_lookup(sk, l3index, addr, AF_INET);
Ian Morris00db4122015-04-03 09:17:27 +01001614 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001615 /*
1616 * We're using one, so create a matching key
1617 * on the newsk structure. If we fail to get
1618 * memory, then we end up not copying the key
1619 * across. Shucks.
1620 */
Leonard Cresteza76c2312021-10-15 10:26:05 +03001621 tcp_md5_do_add(newsk, addr, AF_INET, 32, l3index, key->flags,
David Aherncea97602019-12-30 14:14:25 -08001622 key->key, key->keylen, GFP_ATOMIC);
Eric Dumazeta4654192010-05-16 00:36:33 -07001623 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001624 }
1625#endif
1626
David S. Miller0e734412011-05-08 15:28:03 -07001627 if (__inet_inherit_port(sk, newsk) < 0)
1628 goto put_and_exit;
Ricardo Dias01770a12020-11-20 11:11:33 +00001629 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1630 &found_dup_sk);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001631 if (likely(*own_req)) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001632 tcp_move_syn(newtp, req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001633 ireq->ireq_opt = NULL;
1634 } else {
Kuniyuki Iwashimac89dffc2021-01-18 14:59:20 +09001635 newinet->inet_opt = NULL;
1636
Ricardo Dias01770a12020-11-20 11:11:33 +00001637 if (!req_unhash && found_dup_sk) {
1638 /* This code path should only be executed in the
1639 * syncookie case only
1640 */
1641 bh_unlock_sock(newsk);
1642 sock_put(newsk);
1643 newsk = NULL;
Ricardo Dias01770a12020-11-20 11:11:33 +00001644 }
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001645 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646 return newsk;
1647
1648exit_overflow:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001649 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001650exit_nonewsk:
1651 dst_release(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652exit:
Eric Dumazet9caad862016-04-01 08:52:20 -07001653 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 return NULL;
David S. Miller0e734412011-05-08 15:28:03 -07001655put_and_exit:
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001656 newinet->inet_opt = NULL;
Christoph Paasche337e242012-12-14 04:07:58 +00001657 inet_csk_prepare_forced_close(newsk);
1658 tcp_done(newsk);
David S. Miller0e734412011-05-08 15:28:03 -07001659 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001661EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
Eric Dumazet079096f2015-10-02 11:43:32 -07001663static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001666 const struct tcphdr *th = tcp_hdr(skb);
1667
Florian Westphalaf9b4732010-06-03 00:43:44 +00001668 if (!th->syn)
Cong Wang461b74c2014-10-15 14:33:22 -07001669 sk = cookie_v4_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670#endif
1671 return sk;
1672}
1673
Petar Penkov9349d602019-07-29 09:59:14 -07001674u16 tcp_v4_get_syncookie(struct sock *sk, struct iphdr *iph,
1675 struct tcphdr *th, u32 *cookie)
1676{
1677 u16 mss = 0;
1678#ifdef CONFIG_SYN_COOKIES
1679 mss = tcp_get_syncookie_mss(&tcp_request_sock_ops,
1680 &tcp_request_sock_ipv4_ops, sk, th);
1681 if (mss) {
1682 *cookie = __cookie_v4_init_sequence(iph, th, &mss);
1683 tcp_synq_overflow(sk);
1684 }
1685#endif
1686 return mss;
1687}
1688
Brian Vazquezbbd807d2021-02-01 17:41:32 +00001689INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1690 u32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001692 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693 *
1694 * We have a potential double-lock case here, so even when
1695 * doing backlog processing we use the BH locking scheme.
1696 * This is because we cannot sleep with the original spinlock
1697 * held.
1698 */
1699int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1700{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001701 struct sock *rsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001702
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet404e0a82012-07-29 23:20:37 +00001704 struct dst_entry *dst = sk->sk_rx_dst;
1705
Tom Herbertbdeab992011-08-14 19:45:55 +00001706 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001707 sk_mark_napi_id(sk, skb);
Eric Dumazet404e0a82012-07-29 23:20:37 +00001708 if (dst) {
Eric Dumazet0c0a5ef2021-10-25 09:48:16 -07001709 if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
Brian Vazquezbbd807d2021-02-01 17:41:32 +00001710 !INDIRECT_CALL_1(dst->ops->check, ipv4_dst_check,
1711 dst, 0)) {
David S. Miller92101b32012-07-23 16:29:00 -07001712 dst_release(dst);
1713 sk->sk_rx_dst = NULL;
1714 }
1715 }
Yafang Shao3d97d882018-05-29 23:27:31 +08001716 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 return 0;
1718 }
1719
Eric Dumazet12e25e12015-06-03 23:49:21 -07001720 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 goto csum_err;
1722
1723 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001724 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1725
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 if (!nsk)
1727 goto discard;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 if (nsk != sk) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001729 if (tcp_child_process(sk, nsk, skb)) {
1730 rsk = nsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001732 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 return 0;
1734 }
Eric Dumazetca551582010-06-03 09:03:58 +00001735 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001736 sock_rps_save_rxhash(sk, skb);
Eric Dumazetca551582010-06-03 09:03:58 +00001737
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001738 if (tcp_rcv_state_process(sk, skb)) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001739 rsk = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001741 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 return 0;
1743
1744reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001745 tcp_v4_send_reset(rsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746discard:
1747 kfree_skb(skb);
1748 /* Be careful here. If this function gets more complicated and
1749 * gcc suffers from register pressure on the x86, sk (in %ebx)
1750 * might be destroyed here. This current version compiles correctly,
1751 * but you have been warned.
1752 */
1753 return 0;
1754
1755csum_err:
Jakub Kicinski709c0312021-05-14 13:04:25 -07001756 trace_tcp_bad_csum(skb);
Eric Dumazetc10d9312016-04-29 14:16:47 -07001757 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1758 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 goto discard;
1760}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001761EXPORT_SYMBOL(tcp_v4_do_rcv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762
Paolo Abeni74874492017-09-28 15:51:36 +02001763int tcp_v4_early_demux(struct sk_buff *skb)
David S. Miller41063e92012-06-19 21:22:05 -07001764{
David S. Miller41063e92012-06-19 21:22:05 -07001765 const struct iphdr *iph;
1766 const struct tcphdr *th;
1767 struct sock *sk;
David S. Miller41063e92012-06-19 21:22:05 -07001768
David S. Miller41063e92012-06-19 21:22:05 -07001769 if (skb->pkt_type != PACKET_HOST)
Paolo Abeni74874492017-09-28 15:51:36 +02001770 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001771
Eric Dumazet45f00f92012-10-22 21:42:47 +00001772 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
Paolo Abeni74874492017-09-28 15:51:36 +02001773 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001774
1775 iph = ip_hdr(skb);
Eric Dumazet45f00f92012-10-22 21:42:47 +00001776 th = tcp_hdr(skb);
David S. Miller41063e92012-06-19 21:22:05 -07001777
1778 if (th->doff < sizeof(struct tcphdr) / 4)
Paolo Abeni74874492017-09-28 15:51:36 +02001779 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001780
Eric Dumazet45f00f92012-10-22 21:42:47 +00001781 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
David S. Miller41063e92012-06-19 21:22:05 -07001782 iph->saddr, th->source,
Vijay Subramanian7011d082012-06-23 17:38:10 +00001783 iph->daddr, ntohs(th->dest),
David Ahern3fa6f612017-08-07 08:44:17 -07001784 skb->skb_iif, inet_sdif(skb));
David S. Miller41063e92012-06-19 21:22:05 -07001785 if (sk) {
1786 skb->sk = sk;
1787 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001788 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001789 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001790
David S. Miller41063e92012-06-19 21:22:05 -07001791 if (dst)
1792 dst = dst_check(dst, 0);
David S. Miller92101b32012-07-23 16:29:00 -07001793 if (dst &&
Eric Dumazet0c0a5ef2021-10-25 09:48:16 -07001794 sk->sk_rx_dst_ifindex == skb->skb_iif)
David S. Miller92101b32012-07-23 16:29:00 -07001795 skb_dst_set_noref(skb, dst);
David S. Miller41063e92012-06-19 21:22:05 -07001796 }
1797 }
Paolo Abeni74874492017-09-28 15:51:36 +02001798 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001799}
1800
Eric Dumazetc9c33212016-08-27 07:37:54 -07001801bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1802{
Eric Dumazet82657922019-10-09 15:21:13 -07001803 u32 limit = READ_ONCE(sk->sk_rcvbuf) + READ_ONCE(sk->sk_sndbuf);
Eric Dumazetb160c282021-01-19 08:49:00 -08001804 u32 tail_gso_size, tail_gso_segs;
Eric Dumazet4f693b52018-11-27 14:42:03 -08001805 struct skb_shared_info *shinfo;
1806 const struct tcphdr *th;
1807 struct tcphdr *thtail;
1808 struct sk_buff *tail;
1809 unsigned int hdrlen;
1810 bool fragstolen;
1811 u32 gso_segs;
Eric Dumazetb160c282021-01-19 08:49:00 -08001812 u32 gso_size;
Eric Dumazet4f693b52018-11-27 14:42:03 -08001813 int delta;
Eric Dumazetc9c33212016-08-27 07:37:54 -07001814
1815 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1816 * we can fix skb->truesize to its real value to avoid future drops.
1817 * This is valid because skb is not yet charged to the socket.
1818 * It has been noticed pure SACK packets were sometimes dropped
1819 * (if cooked by drivers without copybreak feature).
1820 */
Eric Dumazet60b1af32017-01-24 14:57:36 -08001821 skb_condense(skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001822
Eric Dumazetade96282018-11-19 17:45:55 -08001823 skb_dst_drop(skb);
1824
Eric Dumazet4f693b52018-11-27 14:42:03 -08001825 if (unlikely(tcp_checksum_complete(skb))) {
1826 bh_unlock_sock(sk);
Jakub Kicinski709c0312021-05-14 13:04:25 -07001827 trace_tcp_bad_csum(skb);
Eric Dumazet4f693b52018-11-27 14:42:03 -08001828 __TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1829 __TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1830 return true;
1831 }
1832
1833 /* Attempt coalescing to last skb in backlog, even if we are
1834 * above the limits.
1835 * This is okay because skb capacity is limited to MAX_SKB_FRAGS.
1836 */
1837 th = (const struct tcphdr *)skb->data;
1838 hdrlen = th->doff * 4;
Eric Dumazet4f693b52018-11-27 14:42:03 -08001839
1840 tail = sk->sk_backlog.tail;
1841 if (!tail)
1842 goto no_coalesce;
1843 thtail = (struct tcphdr *)tail->data;
1844
1845 if (TCP_SKB_CB(tail)->end_seq != TCP_SKB_CB(skb)->seq ||
1846 TCP_SKB_CB(tail)->ip_dsfield != TCP_SKB_CB(skb)->ip_dsfield ||
1847 ((TCP_SKB_CB(tail)->tcp_flags |
Eric Dumazetca2fe292019-04-26 10:10:05 -07001848 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_SYN | TCPHDR_RST | TCPHDR_URG)) ||
1849 !((TCP_SKB_CB(tail)->tcp_flags &
1850 TCP_SKB_CB(skb)->tcp_flags) & TCPHDR_ACK) ||
Eric Dumazet4f693b52018-11-27 14:42:03 -08001851 ((TCP_SKB_CB(tail)->tcp_flags ^
1852 TCP_SKB_CB(skb)->tcp_flags) & (TCPHDR_ECE | TCPHDR_CWR)) ||
1853#ifdef CONFIG_TLS_DEVICE
1854 tail->decrypted != skb->decrypted ||
1855#endif
1856 thtail->doff != th->doff ||
1857 memcmp(thtail + 1, th + 1, hdrlen - sizeof(*th)))
1858 goto no_coalesce;
1859
1860 __skb_pull(skb, hdrlen);
Eric Dumazetb160c282021-01-19 08:49:00 -08001861
1862 shinfo = skb_shinfo(skb);
1863 gso_size = shinfo->gso_size ?: skb->len;
1864 gso_segs = shinfo->gso_segs ?: 1;
1865
1866 shinfo = skb_shinfo(tail);
1867 tail_gso_size = shinfo->gso_size ?: (tail->len - hdrlen);
1868 tail_gso_segs = shinfo->gso_segs ?: 1;
1869
Eric Dumazet4f693b52018-11-27 14:42:03 -08001870 if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) {
Eric Dumazet4f693b52018-11-27 14:42:03 -08001871 TCP_SKB_CB(tail)->end_seq = TCP_SKB_CB(skb)->end_seq;
1872
Eric Dumazet86bccd02020-10-05 06:48:13 -07001873 if (likely(!before(TCP_SKB_CB(skb)->ack_seq, TCP_SKB_CB(tail)->ack_seq))) {
Eric Dumazet4f693b52018-11-27 14:42:03 -08001874 TCP_SKB_CB(tail)->ack_seq = TCP_SKB_CB(skb)->ack_seq;
Eric Dumazet86bccd02020-10-05 06:48:13 -07001875 thtail->window = th->window;
1876 }
Eric Dumazet4f693b52018-11-27 14:42:03 -08001877
Eric Dumazetca2fe292019-04-26 10:10:05 -07001878 /* We have to update both TCP_SKB_CB(tail)->tcp_flags and
1879 * thtail->fin, so that the fast path in tcp_rcv_established()
1880 * is not entered if we append a packet with a FIN.
1881 * SYN, RST, URG are not present.
1882 * ACK is set on both packets.
1883 * PSH : we do not really care in TCP stack,
1884 * at least for 'GRO' packets.
1885 */
1886 thtail->fin |= th->fin;
Eric Dumazet4f693b52018-11-27 14:42:03 -08001887 TCP_SKB_CB(tail)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags;
1888
1889 if (TCP_SKB_CB(skb)->has_rxtstamp) {
1890 TCP_SKB_CB(tail)->has_rxtstamp = true;
1891 tail->tstamp = skb->tstamp;
1892 skb_hwtstamps(tail)->hwtstamp = skb_hwtstamps(skb)->hwtstamp;
1893 }
1894
1895 /* Not as strict as GRO. We only need to carry mss max value */
Eric Dumazetb160c282021-01-19 08:49:00 -08001896 shinfo->gso_size = max(gso_size, tail_gso_size);
1897 shinfo->gso_segs = min_t(u32, gso_segs + tail_gso_segs, 0xFFFF);
Eric Dumazet4f693b52018-11-27 14:42:03 -08001898
1899 sk->sk_backlog.len += delta;
1900 __NET_INC_STATS(sock_net(sk),
1901 LINUX_MIB_TCPBACKLOGCOALESCE);
1902 kfree_skb_partial(skb, fragstolen);
1903 return false;
1904 }
1905 __skb_push(skb, hdrlen);
1906
1907no_coalesce:
1908 /* Only socket owner can try to collapse/prune rx queues
1909 * to reduce memory overhead, so add a little headroom here.
1910 * Few sockets backlog are possibly concurrently non empty.
1911 */
1912 limit += 64*1024;
1913
Eric Dumazetc9c33212016-08-27 07:37:54 -07001914 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1915 bh_unlock_sock(sk);
1916 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1917 return true;
1918 }
1919 return false;
1920}
1921EXPORT_SYMBOL(tcp_add_backlog);
1922
Eric Dumazetac6e7802016-11-10 13:12:35 -08001923int tcp_filter(struct sock *sk, struct sk_buff *skb)
1924{
1925 struct tcphdr *th = (struct tcphdr *)skb->data;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001926
Christoph Paaschf2feaef2019-03-11 11:41:05 -07001927 return sk_filter_trim_cap(sk, skb, th->doff * 4);
Eric Dumazetac6e7802016-11-10 13:12:35 -08001928}
1929EXPORT_SYMBOL(tcp_filter);
1930
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001931static void tcp_v4_restore_cb(struct sk_buff *skb)
1932{
1933 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1934 sizeof(struct inet_skb_parm));
1935}
1936
1937static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1938 const struct tcphdr *th)
1939{
1940 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1941 * barrier() makes sure compiler wont play fool^Waliasing games.
1942 */
1943 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1944 sizeof(struct inet_skb_parm));
1945 barrier();
1946
1947 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1948 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1949 skb->len - th->doff * 4);
1950 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1951 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1952 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1953 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1954 TCP_SKB_CB(skb)->sacked = 0;
1955 TCP_SKB_CB(skb)->has_rxtstamp =
1956 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1957}
1958
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959/*
1960 * From tcp_input.c
1961 */
1962
1963int tcp_v4_rcv(struct sk_buff *skb)
1964{
Eric Dumazet3b24d852016-04-01 08:52:17 -07001965 struct net *net = dev_net(skb->dev);
David Ahern3fa6f612017-08-07 08:44:17 -07001966 int sdif = inet_sdif(skb);
David Ahern534322c2019-12-30 14:14:27 -08001967 int dif = inet_iif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001968 const struct iphdr *iph;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001969 const struct tcphdr *th;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001970 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971 struct sock *sk;
1972 int ret;
1973
1974 if (skb->pkt_type != PACKET_HOST)
1975 goto discard_it;
1976
1977 /* Count it even if it's bad */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001978 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
1980 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1981 goto discard_it;
1982
Eric Dumazetea1627c2016-05-13 09:16:40 -07001983 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984
Eric Dumazetea1627c2016-05-13 09:16:40 -07001985 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986 goto bad_packet;
1987 if (!pskb_may_pull(skb, th->doff * 4))
1988 goto discard_it;
1989
1990 /* An explanation is required here, I think.
1991 * Packet length and doff are validated by header prediction,
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001992 * provided case of th->doff==0 is eliminated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 * So, we defer the checks. */
Tom Herberted70fcf2014-05-02 16:29:38 -07001994
1995 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001996 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
Eric Dumazetea1627c2016-05-13 09:16:40 -07001998 th = (const struct tcphdr *)skb->data;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001999 iph = ip_hdr(skb);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07002000lookup:
Craig Galleka5836362016-02-10 11:50:38 -05002001 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
David Ahern3fa6f612017-08-07 08:44:17 -07002002 th->dest, sdif, &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 if (!sk)
2004 goto no_tcp_socket;
2005
Eric Dumazetbb134d52010-03-09 05:55:56 +00002006process:
2007 if (sk->sk_state == TCP_TIME_WAIT)
2008 goto do_time_wait;
2009
Eric Dumazet079096f2015-10-02 11:43:32 -07002010 if (sk->sk_state == TCP_NEW_SYN_RECV) {
2011 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08002012 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08002013 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07002014
2015 sk = req->rsk_listener;
David Ahern534322c2019-12-30 14:14:27 -08002016 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))) {
Eric Dumazete65c3322016-08-24 08:50:24 -07002017 sk_drops_add(sk, skb);
Eric Dumazet72923552016-02-11 22:50:29 -08002018 reqsk_put(req);
2019 goto discard_it;
2020 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00002021 if (tcp_checksum_complete(skb)) {
2022 reqsk_put(req);
2023 goto csum_error;
2024 }
Eric Dumazet77166822016-02-18 05:39:18 -08002025 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Kuniyuki Iwashimad4f2c862021-06-12 21:32:20 +09002026 nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
2027 if (!nsk) {
2028 inet_csk_reqsk_queue_drop_and_put(sk, req);
2029 goto lookup;
2030 }
2031 sk = nsk;
2032 /* reuseport_migrate_sock() has already held one sk_refcnt
2033 * before returning.
2034 */
2035 } else {
2036 /* We own a reference on the listener, increase it again
2037 * as we might lose it too soon.
2038 */
2039 sock_hold(sk);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07002040 }
Eric Dumazet3b24d852016-04-01 08:52:17 -07002041 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07002042 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08002043 if (!tcp_filter(sk, skb)) {
2044 th = (const struct tcphdr *)skb->data;
2045 iph = ip_hdr(skb);
2046 tcp_v4_fill_cb(skb, iph, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08002047 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08002048 }
Eric Dumazet079096f2015-10-02 11:43:32 -07002049 if (!nsk) {
2050 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08002051 if (req_stolen) {
2052 /* Another cpu got exclusive access to req
2053 * and created a full blown socket.
2054 * Try to feed this packet to this socket
2055 * instead of discarding it.
2056 */
2057 tcp_v4_restore_cb(skb);
2058 sock_put(sk);
2059 goto lookup;
2060 }
Eric Dumazet77166822016-02-18 05:39:18 -08002061 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07002062 }
2063 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07002064 reqsk_put(req);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08002065 tcp_v4_restore_cb(skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07002066 } else if (tcp_child_process(sk, nsk, skb)) {
2067 tcp_v4_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08002068 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07002069 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08002070 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07002071 return 0;
2072 }
2073 }
Eric Dumazet14834c42021-10-25 09:48:23 -07002074
Eric Dumazet020e71a2021-10-25 09:48:24 -07002075 if (static_branch_unlikely(&ip4_min_ttl)) {
2076 /* min_ttl can be changed concurrently from do_ip_setsockopt() */
2077 if (unlikely(iph->ttl < READ_ONCE(inet_sk(sk)->min_ttl))) {
2078 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
2079 goto discard_and_relse;
2080 }
Eric Dumazet6cce09f2010-03-07 23:21:57 +00002081 }
Stephen Hemmingerd218d112010-01-11 16:28:01 -08002082
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2084 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04002085
David Ahern534322c2019-12-30 14:14:27 -08002086 if (tcp_v4_inbound_md5_hash(sk, skb, dif, sdif))
Dmitry Popov9ea88a12014-08-07 02:38:22 +04002087 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04002088
Florian Westphal895b5c92019-09-29 20:54:03 +02002089 nf_reset_ct(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
Eric Dumazetac6e7802016-11-10 13:12:35 -08002091 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08002093 th = (const struct tcphdr *)skb->data;
2094 iph = ip_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08002095 tcp_v4_fill_cb(skb, iph, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
2097 skb->dev = NULL;
2098
Eric Dumazete994b2f2015-10-02 11:43:39 -07002099 if (sk->sk_state == TCP_LISTEN) {
2100 ret = tcp_v4_do_rcv(sk, skb);
2101 goto put_and_return;
2102 }
2103
2104 sk_incoming_cpu_update(sk);
2105
Ingo Molnarc6366182006-07-03 00:25:13 -07002106 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07002107 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 ret = 0;
2109 if (!sock_owned_by_user(sk)) {
Florian Westphale7942d02017-07-30 03:57:18 +02002110 ret = tcp_v4_do_rcv(sk, skb);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07002111 } else {
2112 if (tcp_add_backlog(sk, skb))
2113 goto discard_and_relse;
Zhu Yi6b03a532010-03-04 18:01:41 +00002114 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 bh_unlock_sock(sk);
2116
Eric Dumazete994b2f2015-10-02 11:43:39 -07002117put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07002118 if (refcounted)
2119 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
2121 return ret;
2122
2123no_tcp_socket:
2124 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2125 goto discard_it;
2126
Eric Dumazeteeea10b2017-12-03 09:32:59 -08002127 tcp_v4_fill_cb(skb, iph, th);
2128
Eric Dumazet12e25e12015-06-03 23:49:21 -07002129 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00002130csum_error:
Jakub Kicinski709c0312021-05-14 13:04:25 -07002131 trace_tcp_bad_csum(skb);
Eric Dumazet90bbcc62016-04-27 16:44:32 -07002132 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07002134 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002136 tcp_v4_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 }
2138
2139discard_it:
2140 /* Discard frame. */
2141 kfree_skb(skb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002142 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
2144discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07002145 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002146 if (refcounted)
2147 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 goto discard_it;
2149
2150do_time_wait:
2151 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07002152 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 goto discard_it;
2154 }
2155
Eric Dumazeteeea10b2017-12-03 09:32:59 -08002156 tcp_v4_fill_cb(skb, iph, th);
2157
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00002158 if (tcp_checksum_complete(skb)) {
2159 inet_twsk_put(inet_twsk(sk));
2160 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 }
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07002162 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 case TCP_TW_SYN: {
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002164 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
Craig Galleka5836362016-02-10 11:50:38 -05002165 &tcp_hashinfo, skb,
2166 __tcp_hdrlen(th),
Tom Herbertda5e3632013-01-22 09:50:24 +00002167 iph->saddr, th->source,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07002168 iph->daddr, th->dest,
David Ahern3fa6f612017-08-07 08:44:17 -07002169 inet_iif(skb),
2170 sdif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 if (sk2) {
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07002172 inet_twsk_deschedule_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 sk = sk2;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08002174 tcp_v4_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07002175 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 goto process;
2177 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 }
Gustavo A. R. Silvafcfd6df2017-10-16 15:48:55 -05002179 /* to ACK */
Joe Perchesa8eceea2020-03-12 15:50:22 -07002180 fallthrough;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 case TCP_TW_ACK:
2182 tcp_v4_timewait_ack(sk, skb);
2183 break;
2184 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01002185 tcp_v4_send_reset(sk, skb);
2186 inet_twsk_deschedule_put(inet_twsk(sk));
2187 goto discard_it;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 case TCP_TW_SUCCESS:;
2189 }
2190 goto discard_it;
2191}
2192
David S. Millerccb7c412010-12-01 18:09:13 -08002193static struct timewait_sock_ops tcp_timewait_sock_ops = {
2194 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2195 .twsk_unique = tcp_twsk_unique,
2196 .twsk_destructor= tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08002197};
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
Eric Dumazet63d02d12012-08-09 14:11:00 +00002199void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
Eric Dumazet5d299f32012-08-06 05:09:33 +00002200{
2201 struct dst_entry *dst = skb_dst(skb);
2202
Eric Dumazet5037e9e2015-12-14 14:08:53 -08002203 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -07002204 sk->sk_rx_dst = dst;
Eric Dumazet0c0a5ef2021-10-25 09:48:16 -07002205 sk->sk_rx_dst_ifindex = skb->skb_iif;
Eric Dumazetca777ef2014-09-08 08:06:07 -07002206 }
Eric Dumazet5d299f32012-08-06 05:09:33 +00002207}
Eric Dumazet63d02d12012-08-09 14:11:00 +00002208EXPORT_SYMBOL(inet_sk_rx_dst_set);
Eric Dumazet5d299f32012-08-06 05:09:33 +00002209
Stephen Hemminger3b401a82009-09-01 19:25:04 +00002210const struct inet_connection_sock_af_ops ipv4_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002211 .queue_xmit = ip_queue_xmit,
2212 .send_check = tcp_v4_send_check,
2213 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00002214 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002215 .conn_request = tcp_v4_conn_request,
2216 .syn_recv_sock = tcp_v4_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002217 .net_header_len = sizeof(struct iphdr),
2218 .setsockopt = ip_setsockopt,
2219 .getsockopt = ip_getsockopt,
2220 .addr2sockaddr = inet_csk_addr2sockaddr,
2221 .sockaddr_len = sizeof(struct sockaddr_in),
Neal Cardwell4fab9072014-08-14 12:40:05 -04002222 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00002224EXPORT_SYMBOL(ipv4_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002226#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00002227static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002228 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07002229 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002230 .md5_parse = tcp_v4_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002231};
Andrew Mortonb6332e62006-11-30 19:16:28 -08002232#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002233
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234/* NOTE: A lot of things set to zero explicitly by call to
2235 * sk_alloc() so need not be done here.
2236 */
2237static int tcp_v4_init_sock(struct sock *sk)
2238{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002239 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
Neal Cardwell900f65d2012-04-19 09:55:21 +00002241 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08002243 icsk->icsk_af_ops = &ipv4_specific;
Neal Cardwell900f65d2012-04-19 09:55:21 +00002244
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002245#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04002246 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002247#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 return 0;
2250}
2251
Brian Haley7d06b2e2008-06-14 17:04:49 -07002252void tcp_v4_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253{
2254 struct tcp_sock *tp = tcp_sk(sk);
2255
Song Liue1a4aa52017-10-23 09:20:26 -07002256 trace_tcp_destroy_sock(sk);
2257
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 tcp_clear_xmit_timers(sk);
2259
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002260 tcp_cleanup_congestion_control(sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002261
Dave Watson734942c2017-06-14 11:37:14 -07002262 tcp_cleanup_ulp(sk);
2263
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 /* Cleanup up the write buffer. */
David S. Millerfe067e82007-03-07 12:12:44 -08002265 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266
Wei Wangcf1ef3f2017-04-20 14:45:46 -07002267 /* Check if we want to disable active TFO */
2268 tcp_fastopen_active_disable_ofo_check(sk);
2269
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 /* Cleans up our, hopefully empty, out_of_order_queue. */
Yaogong Wang9f5afea2016-09-07 14:49:28 -07002271 skb_rbtree_purge(&tp->out_of_order_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002273#ifdef CONFIG_TCP_MD5SIG
2274 /* Clean up the MD5 key list, if any */
2275 if (tp->md5sig_info) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00002276 tcp_clear_md5_list(sk);
Mat Martineaufb7df5e2017-12-21 10:29:10 -08002277 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002278 tp->md5sig_info = NULL;
2279 }
2280#endif
2281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 /* Clean up a referenced TCP bind bucket. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002283 if (inet_csk(sk)->icsk_bind_hash)
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002284 inet_put_port(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
Eric Dumazetd983ea62019-10-10 20:17:38 -07002286 BUG_ON(rcu_access_pointer(tp->fastopen_rsk));
William Allen Simpson435cf552009-12-02 18:17:05 +00002287
Yuchung Chengcf60af02012-07-19 06:43:09 +00002288 /* If socket is aborted during connect operation */
2289 tcp_free_fastopen_req(tp);
Yuchung Cheng1fba70e2017-10-18 11:22:51 -07002290 tcp_fastopen_destroy_cipher(sk);
Eric Dumazetcd8ae852015-05-03 21:34:46 -07002291 tcp_saved_syn_free(tp);
Yuchung Chengcf60af02012-07-19 06:43:09 +00002292
Glauber Costa180d8cd2011-12-11 21:47:02 +00002293 sk_sockets_allocated_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295EXPORT_SYMBOL(tcp_v4_destroy_sock);
2296
2297#ifdef CONFIG_PROC_FS
2298/* Proc filesystem TCP sock list dumping. */
2299
Martin KaFai Lauad2d6132021-07-01 13:05:48 -07002300static unsigned short seq_file_family(const struct seq_file *seq);
2301
2302static bool seq_sk_match(struct seq_file *seq, const struct sock *sk)
2303{
2304 unsigned short family = seq_file_family(seq);
2305
2306 /* AF_UNSPEC is used as a match all */
2307 return ((family == AF_UNSPEC || family == sk->sk_family) &&
2308 net_eq(sock_net(sk), seq_file_net(seq)));
2309}
2310
Martin KaFai Laub72acf42021-07-01 13:06:00 -07002311/* Find a non empty bucket (starting from st->bucket)
2312 * and return the first sk from it.
2313 */
2314static void *listening_get_first(struct seq_file *seq)
2315{
2316 struct tcp_iter_state *st = seq->private;
2317
2318 st->offset = 0;
Martin KaFai Lau05c0b352021-07-01 13:06:06 -07002319 for (; st->bucket <= tcp_hashinfo.lhash2_mask; st->bucket++) {
2320 struct inet_listen_hashbucket *ilb2;
2321 struct inet_connection_sock *icsk;
Martin KaFai Laub72acf42021-07-01 13:06:00 -07002322 struct sock *sk;
2323
Martin KaFai Lau05c0b352021-07-01 13:06:06 -07002324 ilb2 = &tcp_hashinfo.lhash2[st->bucket];
2325 if (hlist_empty(&ilb2->head))
Martin KaFai Laub72acf42021-07-01 13:06:00 -07002326 continue;
2327
Martin KaFai Lau05c0b352021-07-01 13:06:06 -07002328 spin_lock(&ilb2->lock);
2329 inet_lhash2_for_each_icsk(icsk, &ilb2->head) {
2330 sk = (struct sock *)icsk;
Martin KaFai Laub72acf42021-07-01 13:06:00 -07002331 if (seq_sk_match(seq, sk))
2332 return sk;
2333 }
Martin KaFai Lau05c0b352021-07-01 13:06:06 -07002334 spin_unlock(&ilb2->lock);
Martin KaFai Laub72acf42021-07-01 13:06:00 -07002335 }
2336
2337 return NULL;
2338}
2339
2340/* Find the next sk of "cur" within the same bucket (i.e. st->bucket).
2341 * If "cur" is the last one in the st->bucket,
2342 * call listening_get_first() to return the first sk of the next
2343 * non empty bucket.
Tom Herberta8b690f2010-06-07 00:43:42 -07002344 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345static void *listening_get_next(struct seq_file *seq, void *cur)
2346{
Jianjun Kong5799de02008-11-03 02:49:10 -08002347 struct tcp_iter_state *st = seq->private;
Martin KaFai Lau05c0b352021-07-01 13:06:06 -07002348 struct inet_listen_hashbucket *ilb2;
2349 struct inet_connection_sock *icsk;
Eric Dumazet3b24d852016-04-01 08:52:17 -07002350 struct sock *sk = cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
Linus Torvalds1da177e2005-04-16 15:20:36 -07002352 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002353 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002354
Martin KaFai Lau05c0b352021-07-01 13:06:06 -07002355 icsk = inet_csk(sk);
2356 inet_lhash2_for_each_icsk_continue(icsk) {
2357 sk = (struct sock *)icsk;
Martin KaFai Lauad2d6132021-07-01 13:05:48 -07002358 if (seq_sk_match(seq, sk))
Eric Dumazet3b24d852016-04-01 08:52:17 -07002359 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 }
Martin KaFai Laub72acf42021-07-01 13:06:00 -07002361
Martin KaFai Lau05c0b352021-07-01 13:06:06 -07002362 ilb2 = &tcp_hashinfo.lhash2[st->bucket];
2363 spin_unlock(&ilb2->lock);
Martin KaFai Laub72acf42021-07-01 13:06:00 -07002364 ++st->bucket;
2365 return listening_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366}
2367
2368static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2369{
Tom Herberta8b690f2010-06-07 00:43:42 -07002370 struct tcp_iter_state *st = seq->private;
2371 void *rc;
2372
2373 st->bucket = 0;
2374 st->offset = 0;
Martin KaFai Laub72acf42021-07-01 13:06:00 -07002375 rc = listening_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
2377 while (rc && *pos) {
2378 rc = listening_get_next(seq, rc);
2379 --*pos;
2380 }
2381 return rc;
2382}
2383
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002384static inline bool empty_bucket(const struct tcp_iter_state *st)
Andi Kleen6eac5602008-08-28 01:08:02 -07002385{
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002386 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
Andi Kleen6eac5602008-08-28 01:08:02 -07002387}
2388
Tom Herberta8b690f2010-06-07 00:43:42 -07002389/*
2390 * Get first established socket starting from bucket given in st->bucket.
2391 * If st->bucket is zero, the very first socket in the hash is returned.
2392 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393static void *established_get_first(struct seq_file *seq)
2394{
Jianjun Kong5799de02008-11-03 02:49:10 -08002395 struct tcp_iter_state *st = seq->private;
Yonghong Songb08d4d32020-06-23 16:08:04 -07002396
Tom Herberta8b690f2010-06-07 00:43:42 -07002397 st->offset = 0;
2398 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002399 struct sock *sk;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002400 struct hlist_nulls_node *node;
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002401 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
Andi Kleen6eac5602008-08-28 01:08:02 -07002403 /* Lockless fast path for the common case of empty buckets */
2404 if (empty_bucket(st))
2405 continue;
2406
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002407 spin_lock_bh(lock);
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002408 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
Martin KaFai Lauad2d6132021-07-01 13:05:48 -07002409 if (seq_sk_match(seq, sk))
2410 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 }
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002412 spin_unlock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 }
Martin KaFai Lauad2d6132021-07-01 13:05:48 -07002414
2415 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416}
2417
2418static void *established_get_next(struct seq_file *seq, void *cur)
2419{
2420 struct sock *sk = cur;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002421 struct hlist_nulls_node *node;
Jianjun Kong5799de02008-11-03 02:49:10 -08002422 struct tcp_iter_state *st = seq->private;
Yonghong Songb08d4d32020-06-23 16:08:04 -07002423
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002425 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002427 sk = sk_nulls_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002428
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002429 sk_nulls_for_each_from(sk, node) {
Martin KaFai Lauad2d6132021-07-01 13:05:48 -07002430 if (seq_sk_match(seq, sk))
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002431 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 }
2433
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002434 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2435 ++st->bucket;
2436 return established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437}
2438
2439static void *established_get_idx(struct seq_file *seq, loff_t pos)
2440{
Tom Herberta8b690f2010-06-07 00:43:42 -07002441 struct tcp_iter_state *st = seq->private;
2442 void *rc;
2443
2444 st->bucket = 0;
2445 rc = established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446
2447 while (rc && pos) {
2448 rc = established_get_next(seq, rc);
2449 --pos;
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02002450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002451 return rc;
2452}
2453
2454static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2455{
2456 void *rc;
Jianjun Kong5799de02008-11-03 02:49:10 -08002457 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 st->state = TCP_SEQ_STATE_LISTENING;
2460 rc = listening_get_idx(seq, &pos);
2461
2462 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463 st->state = TCP_SEQ_STATE_ESTABLISHED;
2464 rc = established_get_idx(seq, pos);
2465 }
2466
2467 return rc;
2468}
2469
Tom Herberta8b690f2010-06-07 00:43:42 -07002470static void *tcp_seek_last_pos(struct seq_file *seq)
2471{
2472 struct tcp_iter_state *st = seq->private;
Martin KaFai Lau525e2f92021-07-01 13:05:41 -07002473 int bucket = st->bucket;
Tom Herberta8b690f2010-06-07 00:43:42 -07002474 int offset = st->offset;
2475 int orig_num = st->num;
2476 void *rc = NULL;
2477
2478 switch (st->state) {
Tom Herberta8b690f2010-06-07 00:43:42 -07002479 case TCP_SEQ_STATE_LISTENING:
Martin KaFai Lau05c0b352021-07-01 13:06:06 -07002480 if (st->bucket > tcp_hashinfo.lhash2_mask)
Tom Herberta8b690f2010-06-07 00:43:42 -07002481 break;
2482 st->state = TCP_SEQ_STATE_LISTENING;
Martin KaFai Laub72acf42021-07-01 13:06:00 -07002483 rc = listening_get_first(seq);
Martin KaFai Lau525e2f92021-07-01 13:05:41 -07002484 while (offset-- && rc && bucket == st->bucket)
Tom Herberta8b690f2010-06-07 00:43:42 -07002485 rc = listening_get_next(seq, rc);
2486 if (rc)
2487 break;
2488 st->bucket = 0;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002489 st->state = TCP_SEQ_STATE_ESTABLISHED;
Joe Perchesa8eceea2020-03-12 15:50:22 -07002490 fallthrough;
Tom Herberta8b690f2010-06-07 00:43:42 -07002491 case TCP_SEQ_STATE_ESTABLISHED:
Tom Herberta8b690f2010-06-07 00:43:42 -07002492 if (st->bucket > tcp_hashinfo.ehash_mask)
2493 break;
2494 rc = established_get_first(seq);
Martin KaFai Lau525e2f92021-07-01 13:05:41 -07002495 while (offset-- && rc && bucket == st->bucket)
Tom Herberta8b690f2010-06-07 00:43:42 -07002496 rc = established_get_next(seq, rc);
2497 }
2498
2499 st->num = orig_num;
2500
2501 return rc;
2502}
2503
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002504void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505{
Jianjun Kong5799de02008-11-03 02:49:10 -08002506 struct tcp_iter_state *st = seq->private;
Tom Herberta8b690f2010-06-07 00:43:42 -07002507 void *rc;
2508
2509 if (*pos && *pos == st->last_pos) {
2510 rc = tcp_seek_last_pos(seq);
2511 if (rc)
2512 goto out;
2513 }
2514
Linus Torvalds1da177e2005-04-16 15:20:36 -07002515 st->state = TCP_SEQ_STATE_LISTENING;
2516 st->num = 0;
Tom Herberta8b690f2010-06-07 00:43:42 -07002517 st->bucket = 0;
2518 st->offset = 0;
2519 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2520
2521out:
2522 st->last_pos = *pos;
2523 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002525EXPORT_SYMBOL(tcp_seq_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002527void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528{
Tom Herberta8b690f2010-06-07 00:43:42 -07002529 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 void *rc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531
2532 if (v == SEQ_START_TOKEN) {
2533 rc = tcp_get_idx(seq, 0);
2534 goto out;
2535 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
2537 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538 case TCP_SEQ_STATE_LISTENING:
2539 rc = listening_get_next(seq, v);
2540 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002542 st->bucket = 0;
2543 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544 rc = established_get_first(seq);
2545 }
2546 break;
2547 case TCP_SEQ_STATE_ESTABLISHED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548 rc = established_get_next(seq, v);
2549 break;
2550 }
2551out:
2552 ++*pos;
Tom Herberta8b690f2010-06-07 00:43:42 -07002553 st->last_pos = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554 return rc;
2555}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002556EXPORT_SYMBOL(tcp_seq_next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002558void tcp_seq_stop(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559{
Jianjun Kong5799de02008-11-03 02:49:10 -08002560 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561
2562 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 case TCP_SEQ_STATE_LISTENING:
2564 if (v != SEQ_START_TOKEN)
Martin KaFai Lau05c0b352021-07-01 13:06:06 -07002565 spin_unlock(&tcp_hashinfo.lhash2[st->bucket].lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 case TCP_SEQ_STATE_ESTABLISHED:
2568 if (v)
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002569 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 break;
2571 }
2572}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002573EXPORT_SYMBOL(tcp_seq_stop);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574
Eric Dumazetd4f06872015-03-12 16:44:09 -07002575static void get_openreq4(const struct request_sock *req,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002576 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002578 const struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetfa76ce732015-03-19 19:04:20 -07002579 long delta = req->rsk_timer.expires - jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002581 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002582 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 i,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002584 ireq->ir_loc_addr,
Eric Dumazetd4f06872015-03-12 16:44:09 -07002585 ireq->ir_num,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002586 ireq->ir_rmt_addr,
2587 ntohs(ireq->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 TCP_SYN_RECV,
2589 0, 0, /* could print option size, but that is af dependent. */
2590 1, /* timers active (only the expire timer) */
Eric Dumazeta399a802012-08-08 21:13:53 +00002591 jiffies_delta_to_clock_t(delta),
Eric Dumazete6c022a2012-10-27 23:16:46 +00002592 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002593 from_kuid_munged(seq_user_ns(f),
2594 sock_i_uid(req->rsk_listener)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 0, /* non standard timer */
2596 0, /* open_requests have no inode */
Eric Dumazetd4f06872015-03-12 16:44:09 -07002597 0,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002598 req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599}
2600
Tetsuo Handa652586d2013-11-14 14:31:57 -08002601static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602{
2603 int timer_active;
2604 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002605 const struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002606 const struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002607 const struct inet_sock *inet = inet_sk(sk);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002608 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00002609 __be32 dest = inet->inet_daddr;
2610 __be32 src = inet->inet_rcv_saddr;
2611 __u16 destp = ntohs(inet->inet_dport);
2612 __u16 srcp = ntohs(inet->inet_sport);
Eric Dumazet49d09002009-12-03 16:06:13 -08002613 int rx_queue;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002614 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002616 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08002617 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002618 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002620 timer_expires = icsk->icsk_timeout;
2621 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002623 timer_expires = icsk->icsk_timeout;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002624 } else if (timer_pending(&sk->sk_timer)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 timer_active = 2;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002626 timer_expires = sk->sk_timer.expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 } else {
2628 timer_active = 0;
2629 timer_expires = jiffies;
2630 }
2631
Yafang Shao986ffdf2017-12-20 11:12:52 +08002632 state = inet_sk_state_load(sk);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002633 if (state == TCP_LISTEN)
Eric Dumazet288efe82019-11-05 14:11:53 -08002634 rx_queue = READ_ONCE(sk->sk_ack_backlog);
Eric Dumazet49d09002009-12-03 16:06:13 -08002635 else
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002636 /* Because we don't lock the socket,
2637 * we might find a transient negative value.
Eric Dumazet49d09002009-12-03 16:06:13 -08002638 */
Eric Dumazetdba7d9b2019-10-10 20:17:39 -07002639 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
Eric Dumazet7db48e92019-10-10 20:17:40 -07002640 READ_ONCE(tp->copied_seq), 0);
Eric Dumazet49d09002009-12-03 16:06:13 -08002641
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002642 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
Tetsuo Handa652586d2013-11-14 14:31:57 -08002643 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002644 i, src, srcp, dest, destp, state,
Eric Dumazet0f317462019-10-10 20:17:41 -07002645 READ_ONCE(tp->write_seq) - tp->snd_una,
Eric Dumazet49d09002009-12-03 16:06:13 -08002646 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00002648 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002649 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002650 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002651 icsk->icsk_probes_out,
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002652 sock_i_ino(sk),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002653 refcount_read(&sk->sk_refcnt), sk,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002654 jiffies_to_clock_t(icsk->icsk_rto),
2655 jiffies_to_clock_t(icsk->icsk_ack.ato),
Wei Wang31954cd2019-01-25 10:53:19 -08002656 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002658 state == TCP_LISTEN ?
2659 fastopenq->max_qlen :
Tetsuo Handa652586d2013-11-14 14:31:57 -08002660 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661}
2662
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002663static void get_timewait4_sock(const struct inet_timewait_sock *tw,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002664 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665{
Eric Dumazet789f5582015-04-12 18:51:09 -07002666 long delta = tw->tw_timer.expires - jiffies;
Al Viro23f33c22006-09-27 18:43:50 -07002667 __be32 dest, src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669
2670 dest = tw->tw_daddr;
2671 src = tw->tw_rcv_saddr;
2672 destp = ntohs(tw->tw_dport);
2673 srcp = ntohs(tw->tw_sport);
2674
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002675 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002676 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002678 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002679 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680}
2681
2682#define TMPSZ 150
2683
2684static int tcp4_seq_show(struct seq_file *seq, void *v)
2685{
Jianjun Kong5799de02008-11-03 02:49:10 -08002686 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002687 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688
Tetsuo Handa652586d2013-11-14 14:31:57 -08002689 seq_setwidth(seq, TMPSZ - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 if (v == SEQ_START_TOKEN) {
Tetsuo Handa652586d2013-11-14 14:31:57 -08002691 seq_puts(seq, " sl local_address rem_address st tx_queue "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 "rx_queue tr tm->when retrnsmt uid timeout "
2693 "inode");
2694 goto out;
2695 }
2696 st = seq->private;
2697
Eric Dumazet079096f2015-10-02 11:43:32 -07002698 if (sk->sk_state == TCP_TIME_WAIT)
2699 get_timewait4_sock(v, seq, st->num);
2700 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002701 get_openreq4(v, seq, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002702 else
2703 get_tcp4_sock(v, seq, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704out:
Tetsuo Handa652586d2013-11-14 14:31:57 -08002705 seq_pad(seq, '\n');
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 return 0;
2707}
2708
Yonghong Song52d87d52020-06-23 16:08:05 -07002709#ifdef CONFIG_BPF_SYSCALL
Martin KaFai Lau04c78202021-07-01 13:06:13 -07002710struct bpf_tcp_iter_state {
2711 struct tcp_iter_state state;
2712 unsigned int cur_sk;
2713 unsigned int end_sk;
2714 unsigned int max_sk;
2715 struct sock **batch;
2716 bool st_bucket_done;
2717};
2718
Yonghong Song52d87d52020-06-23 16:08:05 -07002719struct bpf_iter__tcp {
2720 __bpf_md_ptr(struct bpf_iter_meta *, meta);
2721 __bpf_md_ptr(struct sock_common *, sk_common);
2722 uid_t uid __aligned(8);
2723};
2724
2725static int tcp_prog_seq_show(struct bpf_prog *prog, struct bpf_iter_meta *meta,
2726 struct sock_common *sk_common, uid_t uid)
2727{
2728 struct bpf_iter__tcp ctx;
2729
2730 meta->seq_num--; /* skip SEQ_START_TOKEN */
2731 ctx.meta = meta;
2732 ctx.sk_common = sk_common;
2733 ctx.uid = uid;
2734 return bpf_iter_run_prog(prog, &ctx);
2735}
2736
Martin KaFai Lau04c78202021-07-01 13:06:13 -07002737static void bpf_iter_tcp_put_batch(struct bpf_tcp_iter_state *iter)
2738{
2739 while (iter->cur_sk < iter->end_sk)
2740 sock_put(iter->batch[iter->cur_sk++]);
2741}
2742
2743static int bpf_iter_tcp_realloc_batch(struct bpf_tcp_iter_state *iter,
2744 unsigned int new_batch_sz)
2745{
2746 struct sock **new_batch;
2747
2748 new_batch = kvmalloc(sizeof(*new_batch) * new_batch_sz,
2749 GFP_USER | __GFP_NOWARN);
2750 if (!new_batch)
2751 return -ENOMEM;
2752
2753 bpf_iter_tcp_put_batch(iter);
2754 kvfree(iter->batch);
2755 iter->batch = new_batch;
2756 iter->max_sk = new_batch_sz;
2757
2758 return 0;
2759}
2760
2761static unsigned int bpf_iter_tcp_listening_batch(struct seq_file *seq,
2762 struct sock *start_sk)
2763{
2764 struct bpf_tcp_iter_state *iter = seq->private;
2765 struct tcp_iter_state *st = &iter->state;
2766 struct inet_connection_sock *icsk;
2767 unsigned int expected = 1;
2768 struct sock *sk;
2769
2770 sock_hold(start_sk);
2771 iter->batch[iter->end_sk++] = start_sk;
2772
2773 icsk = inet_csk(start_sk);
2774 inet_lhash2_for_each_icsk_continue(icsk) {
2775 sk = (struct sock *)icsk;
2776 if (seq_sk_match(seq, sk)) {
2777 if (iter->end_sk < iter->max_sk) {
2778 sock_hold(sk);
2779 iter->batch[iter->end_sk++] = sk;
2780 }
2781 expected++;
2782 }
2783 }
2784 spin_unlock(&tcp_hashinfo.lhash2[st->bucket].lock);
2785
2786 return expected;
2787}
2788
2789static unsigned int bpf_iter_tcp_established_batch(struct seq_file *seq,
2790 struct sock *start_sk)
2791{
2792 struct bpf_tcp_iter_state *iter = seq->private;
2793 struct tcp_iter_state *st = &iter->state;
2794 struct hlist_nulls_node *node;
2795 unsigned int expected = 1;
2796 struct sock *sk;
2797
2798 sock_hold(start_sk);
2799 iter->batch[iter->end_sk++] = start_sk;
2800
2801 sk = sk_nulls_next(start_sk);
2802 sk_nulls_for_each_from(sk, node) {
2803 if (seq_sk_match(seq, sk)) {
2804 if (iter->end_sk < iter->max_sk) {
2805 sock_hold(sk);
2806 iter->batch[iter->end_sk++] = sk;
2807 }
2808 expected++;
2809 }
2810 }
2811 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2812
2813 return expected;
2814}
2815
2816static struct sock *bpf_iter_tcp_batch(struct seq_file *seq)
2817{
2818 struct bpf_tcp_iter_state *iter = seq->private;
2819 struct tcp_iter_state *st = &iter->state;
2820 unsigned int expected;
2821 bool resized = false;
2822 struct sock *sk;
2823
2824 /* The st->bucket is done. Directly advance to the next
2825 * bucket instead of having the tcp_seek_last_pos() to skip
2826 * one by one in the current bucket and eventually find out
2827 * it has to advance to the next bucket.
2828 */
2829 if (iter->st_bucket_done) {
2830 st->offset = 0;
2831 st->bucket++;
2832 if (st->state == TCP_SEQ_STATE_LISTENING &&
2833 st->bucket > tcp_hashinfo.lhash2_mask) {
2834 st->state = TCP_SEQ_STATE_ESTABLISHED;
2835 st->bucket = 0;
2836 }
2837 }
2838
2839again:
2840 /* Get a new batch */
2841 iter->cur_sk = 0;
2842 iter->end_sk = 0;
2843 iter->st_bucket_done = false;
2844
2845 sk = tcp_seek_last_pos(seq);
2846 if (!sk)
2847 return NULL; /* Done */
2848
2849 if (st->state == TCP_SEQ_STATE_LISTENING)
2850 expected = bpf_iter_tcp_listening_batch(seq, sk);
2851 else
2852 expected = bpf_iter_tcp_established_batch(seq, sk);
2853
2854 if (iter->end_sk == expected) {
2855 iter->st_bucket_done = true;
2856 return sk;
2857 }
2858
2859 if (!resized && !bpf_iter_tcp_realloc_batch(iter, expected * 3 / 2)) {
2860 resized = true;
2861 goto again;
2862 }
2863
2864 return sk;
2865}
2866
2867static void *bpf_iter_tcp_seq_start(struct seq_file *seq, loff_t *pos)
2868{
2869 /* bpf iter does not support lseek, so it always
2870 * continue from where it was stop()-ped.
2871 */
2872 if (*pos)
2873 return bpf_iter_tcp_batch(seq);
2874
2875 return SEQ_START_TOKEN;
2876}
2877
2878static void *bpf_iter_tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2879{
2880 struct bpf_tcp_iter_state *iter = seq->private;
2881 struct tcp_iter_state *st = &iter->state;
2882 struct sock *sk;
2883
2884 /* Whenever seq_next() is called, the iter->cur_sk is
2885 * done with seq_show(), so advance to the next sk in
2886 * the batch.
2887 */
2888 if (iter->cur_sk < iter->end_sk) {
2889 /* Keeping st->num consistent in tcp_iter_state.
2890 * bpf_iter_tcp does not use st->num.
2891 * meta.seq_num is used instead.
2892 */
2893 st->num++;
2894 /* Move st->offset to the next sk in the bucket such that
2895 * the future start() will resume at st->offset in
2896 * st->bucket. See tcp_seek_last_pos().
2897 */
2898 st->offset++;
2899 sock_put(iter->batch[iter->cur_sk++]);
2900 }
2901
2902 if (iter->cur_sk < iter->end_sk)
2903 sk = iter->batch[iter->cur_sk];
2904 else
2905 sk = bpf_iter_tcp_batch(seq);
2906
2907 ++*pos;
2908 /* Keeping st->last_pos consistent in tcp_iter_state.
2909 * bpf iter does not do lseek, so st->last_pos always equals to *pos.
2910 */
2911 st->last_pos = *pos;
2912 return sk;
2913}
2914
Yonghong Song52d87d52020-06-23 16:08:05 -07002915static int bpf_iter_tcp_seq_show(struct seq_file *seq, void *v)
2916{
2917 struct bpf_iter_meta meta;
2918 struct bpf_prog *prog;
2919 struct sock *sk = v;
Martin KaFai Lau04c78202021-07-01 13:06:13 -07002920 bool slow;
Yonghong Song52d87d52020-06-23 16:08:05 -07002921 uid_t uid;
Martin KaFai Lau04c78202021-07-01 13:06:13 -07002922 int ret;
Yonghong Song52d87d52020-06-23 16:08:05 -07002923
2924 if (v == SEQ_START_TOKEN)
2925 return 0;
2926
Martin KaFai Lau04c78202021-07-01 13:06:13 -07002927 if (sk_fullsock(sk))
2928 slow = lock_sock_fast(sk);
2929
2930 if (unlikely(sk_unhashed(sk))) {
2931 ret = SEQ_SKIP;
2932 goto unlock;
2933 }
2934
Yonghong Song52d87d52020-06-23 16:08:05 -07002935 if (sk->sk_state == TCP_TIME_WAIT) {
2936 uid = 0;
2937 } else if (sk->sk_state == TCP_NEW_SYN_RECV) {
2938 const struct request_sock *req = v;
2939
2940 uid = from_kuid_munged(seq_user_ns(seq),
2941 sock_i_uid(req->rsk_listener));
2942 } else {
2943 uid = from_kuid_munged(seq_user_ns(seq), sock_i_uid(sk));
2944 }
2945
2946 meta.seq = seq;
2947 prog = bpf_iter_get_info(&meta, false);
Martin KaFai Lau04c78202021-07-01 13:06:13 -07002948 ret = tcp_prog_seq_show(prog, &meta, v, uid);
2949
2950unlock:
2951 if (sk_fullsock(sk))
2952 unlock_sock_fast(sk, slow);
2953 return ret;
2954
Yonghong Song52d87d52020-06-23 16:08:05 -07002955}
2956
2957static void bpf_iter_tcp_seq_stop(struct seq_file *seq, void *v)
2958{
Martin KaFai Lau04c78202021-07-01 13:06:13 -07002959 struct bpf_tcp_iter_state *iter = seq->private;
Yonghong Song52d87d52020-06-23 16:08:05 -07002960 struct bpf_iter_meta meta;
2961 struct bpf_prog *prog;
2962
2963 if (!v) {
2964 meta.seq = seq;
2965 prog = bpf_iter_get_info(&meta, true);
2966 if (prog)
2967 (void)tcp_prog_seq_show(prog, &meta, v, 0);
2968 }
2969
Martin KaFai Lau04c78202021-07-01 13:06:13 -07002970 if (iter->cur_sk < iter->end_sk) {
2971 bpf_iter_tcp_put_batch(iter);
2972 iter->st_bucket_done = false;
2973 }
Yonghong Song52d87d52020-06-23 16:08:05 -07002974}
2975
2976static const struct seq_operations bpf_iter_tcp_seq_ops = {
2977 .show = bpf_iter_tcp_seq_show,
Martin KaFai Lau04c78202021-07-01 13:06:13 -07002978 .start = bpf_iter_tcp_seq_start,
2979 .next = bpf_iter_tcp_seq_next,
Yonghong Song52d87d52020-06-23 16:08:05 -07002980 .stop = bpf_iter_tcp_seq_stop,
2981};
2982#endif
Martin KaFai Lauad2d6132021-07-01 13:05:48 -07002983static unsigned short seq_file_family(const struct seq_file *seq)
2984{
Martin KaFai Lau62001372021-07-01 13:05:54 -07002985 const struct tcp_seq_afinfo *afinfo;
Martin KaFai Lauad2d6132021-07-01 13:05:48 -07002986
Martin KaFai Lau62001372021-07-01 13:05:54 -07002987#ifdef CONFIG_BPF_SYSCALL
Martin KaFai Lauad2d6132021-07-01 13:05:48 -07002988 /* Iterated from bpf_iter. Let the bpf prog to filter instead. */
Martin KaFai Lau62001372021-07-01 13:05:54 -07002989 if (seq->op == &bpf_iter_tcp_seq_ops)
Martin KaFai Lauad2d6132021-07-01 13:05:48 -07002990 return AF_UNSPEC;
Martin KaFai Lau62001372021-07-01 13:05:54 -07002991#endif
Martin KaFai Lauad2d6132021-07-01 13:05:48 -07002992
2993 /* Iterated from proc fs */
2994 afinfo = PDE_DATA(file_inode(seq->file));
2995 return afinfo->family;
2996}
Yonghong Song52d87d52020-06-23 16:08:05 -07002997
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002998static const struct seq_operations tcp4_seq_ops = {
2999 .show = tcp4_seq_show,
3000 .start = tcp_seq_start,
3001 .next = tcp_seq_next,
3002 .stop = tcp_seq_stop,
3003};
3004
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005static struct tcp_seq_afinfo tcp4_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003006 .family = AF_INET,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003007};
3008
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003009static int __net_init tcp4_proc_init_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07003010{
Christoph Hellwigc3506372018-04-10 19:42:55 +02003011 if (!proc_create_net_data("tcp", 0444, net->proc_net, &tcp4_seq_ops,
3012 sizeof(struct tcp_iter_state), &tcp4_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02003013 return -ENOMEM;
3014 return 0;
Pavel Emelyanov757764f2008-03-24 14:56:02 -07003015}
3016
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00003017static void __net_exit tcp4_proc_exit_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07003018{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02003019 remove_proc_entry("tcp", net->proc_net);
Pavel Emelyanov757764f2008-03-24 14:56:02 -07003020}
3021
3022static struct pernet_operations tcp4_net_ops = {
3023 .init = tcp4_proc_init_net,
3024 .exit = tcp4_proc_exit_net,
3025};
3026
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027int __init tcp4_proc_init(void)
3028{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07003029 return register_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030}
3031
3032void tcp4_proc_exit(void)
3033{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07003034 unregister_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035}
3036#endif /* CONFIG_PROC_FS */
3037
Eric Dumazetd3cd4922020-11-13 07:08:08 -08003038/* @wake is one when sk_stream_write_space() calls us.
3039 * This sends EPOLLOUT only if notsent_bytes is half the limit.
3040 * This mimics the strategy used in sock_def_write_space().
3041 */
3042bool tcp_stream_memory_free(const struct sock *sk, int wake)
3043{
3044 const struct tcp_sock *tp = tcp_sk(sk);
3045 u32 notsent_bytes = READ_ONCE(tp->write_seq) -
3046 READ_ONCE(tp->snd_nxt);
3047
3048 return (notsent_bytes << wake) < tcp_notsent_lowat(tp);
3049}
3050EXPORT_SYMBOL(tcp_stream_memory_free);
3051
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052struct proto tcp_prot = {
3053 .name = "TCP",
3054 .owner = THIS_MODULE,
3055 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07003056 .pre_connect = tcp_v4_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 .connect = tcp_v4_connect,
3058 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07003059 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060 .ioctl = tcp_ioctl,
3061 .init = tcp_v4_init_sock,
3062 .destroy = tcp_v4_destroy_sock,
3063 .shutdown = tcp_shutdown,
3064 .setsockopt = tcp_setsockopt,
3065 .getsockopt = tcp_getsockopt,
Stanislav Fomichev9cacf812021-01-15 08:34:59 -08003066 .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01003067 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00003069 .sendmsg = tcp_sendmsg,
3070 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003071 .backlog_rcv = tcp_v4_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00003072 .release_cb = tcp_release_cb,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08003073 .hash = inet_hash,
3074 .unhash = inet_unhash,
3075 .get_port = inet_csk_get_port,
Cong Wang8a59f9d2021-03-30 19:32:31 -07003076#ifdef CONFIG_BPF_SYSCALL
3077 .psock_update_sk_prot = tcp_bpf_update_proto,
3078#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07003080 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07003081 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003082 .sockets_allocated = &tcp_sockets_allocated,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07003083 .orphan_count = &tcp_orphan_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 .memory_allocated = &tcp_memory_allocated,
3085 .memory_pressure = &tcp_memory_pressure,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07003086 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08003087 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
3088 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089 .max_header = MAX_TCP_HEADER,
3090 .obj_size = sizeof(struct tcp_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08003091 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08003092 .twsk_prot = &tcp_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07003093 .rsk_prot = &tcp_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07003094 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00003095 .no_autobind = true,
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09003096 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00003098EXPORT_SYMBOL(tcp_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099
Denis V. Lunev046ee902008-04-03 14:31:33 -07003100static void __net_exit tcp_sk_exit(struct net *net)
3101{
Eric Dumazetbdbbb852015-01-29 21:35:05 -08003102 int cpu;
3103
Dust Lib506bc92019-04-01 16:04:53 +08003104 if (net->ipv4.tcp_congestion_control)
Martin KaFai Lau0baf26b2020-01-08 16:35:08 -08003105 bpf_module_put(net->ipv4.tcp_congestion_control,
3106 net->ipv4.tcp_congestion_control->owner);
Stephen Hemminger6670e152017-11-14 08:25:49 -08003107
Eric Dumazetbdbbb852015-01-29 21:35:05 -08003108 for_each_possible_cpu(cpu)
3109 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
3110 free_percpu(net->ipv4.tcp_sk);
3111}
3112
3113static int __net_init tcp_sk_init(struct net *net)
3114{
Haishuang Yanfee83d02016-12-28 17:52:33 +08003115 int res, cpu, cnt;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08003116
3117 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
3118 if (!net->ipv4.tcp_sk)
3119 return -ENOMEM;
3120
3121 for_each_possible_cpu(cpu) {
3122 struct sock *sk;
3123
3124 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
3125 IPPROTO_TCP, net);
3126 if (res)
3127 goto fail;
Eric Dumazeta9d65322016-04-01 08:52:21 -07003128 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
Eric Dumazet431280e2018-08-22 13:30:45 -07003129
3130 /* Please enforce IP_DF and IPID==0 for RST and
3131 * ACK sent in SYN-RECV and TIME-WAIT state.
3132 */
3133 inet_sk(sk)->pmtudisc = IP_PMTUDISC_DO;
3134
Eric Dumazetbdbbb852015-01-29 21:35:05 -08003135 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
3136 }
Daniel Borkmann49213552015-05-19 21:04:22 +02003137
Eric Dumazetbdbbb852015-01-29 21:35:05 -08003138 net->ipv4.sysctl_tcp_ecn = 2;
Daniel Borkmann49213552015-05-19 21:04:22 +02003139 net->ipv4.sysctl_tcp_ecn_fallback = 1;
3140
Fan Dub0f9ca52015-02-10 09:53:16 +08003141 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
Eric Dumazet5f3e2bf002019-06-06 09:15:31 -07003142 net->ipv4.sysctl_tcp_min_snd_mss = TCP_MIN_SND_MSS;
Fan Du6b58e0a2015-03-06 11:18:23 +08003143 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
Fan Du05cbc0d2015-03-06 11:18:24 +08003144 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
Josh Huntc04b79b2019-08-07 19:52:29 -04003145 net->ipv4.sysctl_tcp_mtu_probe_floor = TCP_MIN_SND_MSS;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08003146
Nikolay Borisov13b287e2016-01-07 16:38:43 +02003147 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
Nikolay Borisov9bd68612016-01-07 16:38:44 +02003148 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
Nikolay Borisovb840d152016-01-07 16:38:45 +02003149 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
Nikolay Borisov13b287e2016-01-07 16:38:43 +02003150
Nikolay Borisov6fa25162016-02-03 09:46:49 +02003151 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
Nikolay Borisov7c083ec2016-02-03 09:46:50 +02003152 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
David S. Miller0aca7372016-02-08 04:24:33 -05003153 net->ipv4.sysctl_tcp_syncookies = 1;
Nikolay Borisov1043e252016-02-03 09:46:52 +02003154 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
Nikolay Borisovae5c3f42016-02-03 09:46:53 +02003155 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
Nikolay Borisovc6214a92016-02-03 09:46:54 +02003156 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
Nikolay Borisovc402d9b2016-02-03 09:46:55 +02003157 net->ipv4.sysctl_tcp_orphan_retries = 0;
Nikolay Borisov1e579ca2016-02-03 09:46:56 +02003158 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
Nikolay Borisov4979f2d2016-02-03 09:46:57 +02003159 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
Maciej Żenczykowski79e9fed2018-06-03 10:41:17 -07003160 net->ipv4.sysctl_tcp_tw_reuse = 2;
Kevin(Yudong) Yang65e6d902019-12-09 14:19:59 -05003161 net->ipv4.sysctl_tcp_no_ssthresh_metrics_save = 1;
Nikolay Borisov12ed8242016-02-03 09:46:51 +02003162
Haishuang Yanfee83d02016-12-28 17:52:33 +08003163 cnt = tcp_hashinfo.ehash_mask + 1;
Yafang Shao743e4812018-09-01 20:21:05 +08003164 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
Haishuang Yan1946e672016-12-28 17:52:32 +08003165 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
3166
Eric Dumazet623d0c22019-10-30 10:05:46 -07003167 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 128);
Eric Dumazetf9301032017-06-07 10:34:37 -07003168 net->ipv4.sysctl_tcp_sack = 1;
Eric Dumazet9bb37ef2017-06-07 10:34:38 -07003169 net->ipv4.sysctl_tcp_window_scaling = 1;
Eric Dumazet5d2ed052017-06-07 10:34:39 -07003170 net->ipv4.sysctl_tcp_timestamps = 1;
Eric Dumazet2ae21cf2017-10-26 21:54:56 -07003171 net->ipv4.sysctl_tcp_early_retrans = 3;
Eric Dumazete20223f2017-10-26 21:54:57 -07003172 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
Eric Dumazetb510f0d2017-10-26 21:54:59 -07003173 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
Eric Dumazete0a1e5b2017-10-26 21:55:00 -07003174 net->ipv4.sysctl_tcp_retrans_collapse = 1;
Eric Dumazetc6e21802017-10-26 21:55:06 -07003175 net->ipv4.sysctl_tcp_max_reordering = 300;
Eric Dumazet6496f6b2017-10-26 21:55:07 -07003176 net->ipv4.sysctl_tcp_dsack = 1;
Eric Dumazet0c126542017-10-26 21:55:08 -07003177 net->ipv4.sysctl_tcp_app_win = 31;
Eric Dumazet94f08932017-10-26 21:55:09 -07003178 net->ipv4.sysctl_tcp_adv_win_scale = 1;
Eric Dumazetaf9b69a2017-10-26 21:55:10 -07003179 net->ipv4.sysctl_tcp_frto = 2;
Eric Dumazet4540c0c2017-10-27 07:47:22 -07003180 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
Eric Dumazetd06a9902017-10-27 07:47:23 -07003181 /* This limits the percentage of the congestion window which we
3182 * will allow a single TSO frame to consume. Building TSO frames
3183 * which are too large can cause TCP streams to be bursty.
3184 */
3185 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
Eric Dumazetc73e5802018-11-11 07:34:28 -08003186 /* Default TSQ limit of 16 TSO segments */
3187 net->ipv4.sysctl_tcp_limit_output_bytes = 16 * 65536;
Eric Dumazetb530b682017-10-27 07:47:26 -07003188 /* rfc5961 challenge ack rate limiting */
3189 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
Eric Dumazet26e95962017-10-27 07:47:27 -07003190 net->ipv4.sysctl_tcp_min_tso_segs = 2;
Eric Dumazetbd239702017-10-27 07:47:28 -07003191 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
Eric Dumazet790f00e2017-10-27 07:47:29 -07003192 net->ipv4.sysctl_tcp_autocorking = 1;
Eric Dumazet4170ba62017-10-27 07:47:30 -07003193 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
Eric Dumazet23a7102a2017-10-27 07:47:31 -07003194 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
Eric Dumazetc26e91f2017-10-27 07:47:32 -07003195 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
Eric Dumazet356d1832017-11-07 00:29:28 -08003196 if (net != &init_net) {
3197 memcpy(net->ipv4.sysctl_tcp_rmem,
3198 init_net.ipv4.sysctl_tcp_rmem,
3199 sizeof(init_net.ipv4.sysctl_tcp_rmem));
3200 memcpy(net->ipv4.sysctl_tcp_wmem,
3201 init_net.ipv4.sysctl_tcp_wmem,
3202 sizeof(init_net.ipv4.sysctl_tcp_wmem));
3203 }
Eric Dumazet6d82aa22018-05-17 14:47:28 -07003204 net->ipv4.sysctl_tcp_comp_sack_delay_ns = NSEC_PER_MSEC;
Eric Dumazeta70437c2020-04-30 10:35:43 -07003205 net->ipv4.sysctl_tcp_comp_sack_slack_ns = 100 * NSEC_PER_USEC;
Eric Dumazet9c21d2f2018-05-17 14:47:29 -07003206 net->ipv4.sysctl_tcp_comp_sack_nr = 44;
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08003207 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
Wei Wang213ad732021-07-21 10:27:38 -07003208 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 0;
Haishuang Yan3733be12017-09-27 11:35:43 +08003209 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08003210
Stephen Hemminger6670e152017-11-14 08:25:49 -08003211 /* Reno is always built in */
3212 if (!net_eq(net, &init_net) &&
Martin KaFai Lau0baf26b2020-01-08 16:35:08 -08003213 bpf_try_module_get(init_net.ipv4.tcp_congestion_control,
3214 init_net.ipv4.tcp_congestion_control->owner))
Stephen Hemminger6670e152017-11-14 08:25:49 -08003215 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
3216 else
3217 net->ipv4.tcp_congestion_control = &tcp_reno;
3218
Daniel Borkmann49213552015-05-19 21:04:22 +02003219 return 0;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08003220fail:
3221 tcp_sk_exit(net);
3222
3223 return res;
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00003224}
3225
3226static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
3227{
Haishuang Yan43713842017-09-27 11:35:42 +08003228 struct net *net;
3229
Haishuang Yan1946e672016-12-28 17:52:32 +08003230 inet_twsk_purge(&tcp_hashinfo, AF_INET);
Haishuang Yan43713842017-09-27 11:35:42 +08003231
3232 list_for_each_entry(net, net_exit_list, exit_list)
3233 tcp_fastopen_ctx_destroy(net);
Denis V. Lunev046ee902008-04-03 14:31:33 -07003234}
3235
3236static struct pernet_operations __net_initdata tcp_sk_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00003237 .init = tcp_sk_init,
3238 .exit = tcp_sk_exit,
3239 .exit_batch = tcp_sk_exit_batch,
Denis V. Lunev046ee902008-04-03 14:31:33 -07003240};
3241
Yonghong Song52d87d52020-06-23 16:08:05 -07003242#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3243DEFINE_BPF_ITER_FUNC(tcp, struct bpf_iter_meta *meta,
3244 struct sock_common *sk_common, uid_t uid)
3245
Martin KaFai Lau04c78202021-07-01 13:06:13 -07003246#define INIT_BATCH_SZ 16
3247
Yonghong Songf9c79272020-07-23 11:41:10 -07003248static int bpf_iter_init_tcp(void *priv_data, struct bpf_iter_aux_info *aux)
Yonghong Song52d87d52020-06-23 16:08:05 -07003249{
Martin KaFai Lau04c78202021-07-01 13:06:13 -07003250 struct bpf_tcp_iter_state *iter = priv_data;
3251 int err;
Yonghong Song52d87d52020-06-23 16:08:05 -07003252
Martin KaFai Lau04c78202021-07-01 13:06:13 -07003253 err = bpf_iter_init_seq_net(priv_data, aux);
3254 if (err)
3255 return err;
Yonghong Song52d87d52020-06-23 16:08:05 -07003256
Martin KaFai Lau04c78202021-07-01 13:06:13 -07003257 err = bpf_iter_tcp_realloc_batch(iter, INIT_BATCH_SZ);
3258 if (err) {
3259 bpf_iter_fini_seq_net(priv_data);
3260 return err;
3261 }
3262
3263 return 0;
Yonghong Song52d87d52020-06-23 16:08:05 -07003264}
3265
3266static void bpf_iter_fini_tcp(void *priv_data)
3267{
Martin KaFai Lau04c78202021-07-01 13:06:13 -07003268 struct bpf_tcp_iter_state *iter = priv_data;
Yonghong Song52d87d52020-06-23 16:08:05 -07003269
Yonghong Song52d87d52020-06-23 16:08:05 -07003270 bpf_iter_fini_seq_net(priv_data);
Martin KaFai Lau04c78202021-07-01 13:06:13 -07003271 kvfree(iter->batch);
Yonghong Song52d87d52020-06-23 16:08:05 -07003272}
3273
Yonghong Song14fc6bd62020-07-23 11:41:09 -07003274static const struct bpf_iter_seq_info tcp_seq_info = {
Yonghong Song52d87d52020-06-23 16:08:05 -07003275 .seq_ops = &bpf_iter_tcp_seq_ops,
3276 .init_seq_private = bpf_iter_init_tcp,
3277 .fini_seq_private = bpf_iter_fini_tcp,
Martin KaFai Lau04c78202021-07-01 13:06:13 -07003278 .seq_priv_size = sizeof(struct bpf_tcp_iter_state),
Yonghong Song14fc6bd62020-07-23 11:41:09 -07003279};
3280
Martin KaFai Lau3cee6fb2021-07-01 13:06:19 -07003281static const struct bpf_func_proto *
3282bpf_iter_tcp_get_func_proto(enum bpf_func_id func_id,
3283 const struct bpf_prog *prog)
3284{
3285 switch (func_id) {
3286 case BPF_FUNC_setsockopt:
3287 return &bpf_sk_setsockopt_proto;
3288 case BPF_FUNC_getsockopt:
3289 return &bpf_sk_getsockopt_proto;
3290 default:
3291 return NULL;
3292 }
3293}
3294
Yonghong Song14fc6bd62020-07-23 11:41:09 -07003295static struct bpf_iter_reg tcp_reg_info = {
3296 .target = "tcp",
Yonghong Song52d87d52020-06-23 16:08:05 -07003297 .ctx_arg_info_size = 1,
3298 .ctx_arg_info = {
3299 { offsetof(struct bpf_iter__tcp, sk_common),
3300 PTR_TO_BTF_ID_OR_NULL },
3301 },
Martin KaFai Lau3cee6fb2021-07-01 13:06:19 -07003302 .get_func_proto = bpf_iter_tcp_get_func_proto,
Yonghong Song14fc6bd62020-07-23 11:41:09 -07003303 .seq_info = &tcp_seq_info,
Yonghong Song52d87d52020-06-23 16:08:05 -07003304};
3305
3306static void __init bpf_iter_register(void)
3307{
Yonghong Song951cf362020-07-20 09:34:03 -07003308 tcp_reg_info.ctx_arg_info[0].btf_id = btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON];
Yonghong Song52d87d52020-06-23 16:08:05 -07003309 if (bpf_iter_reg_target(&tcp_reg_info))
3310 pr_warn("Warning: could not register bpf iterator tcp\n");
3311}
3312
3313#endif
3314
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08003315void __init tcp_v4_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316{
Eric W. Biederman6a1b3052009-02-22 00:10:18 -08003317 if (register_pernet_subsys(&tcp_sk_ops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318 panic("Failed to create the TCP control socket.\n");
Yonghong Song52d87d52020-06-23 16:08:05 -07003319
3320#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
3321 bpf_iter_register();
3322#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323}