blob: 645f259d0972f3e6b6643f00c3c3be6b4219ba3b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * IPv4 specific functions
9 *
10 *
11 * code split from:
12 * linux/ipv4/tcp.c
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
15 *
16 * See tcp.c for author information
17 *
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
22 */
23
24/*
25 * Changes:
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
34 * ACK bit.
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -070037 * request_sock handling and moved
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -080040 * Added new listen semantics.
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
45 * coma.
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
51 */
52
Joe Perchesafd465032012-03-12 07:03:32 +000053#define pr_fmt(fmt) "TCP: " fmt
Linus Torvalds1da177e2005-04-16 15:20:36 -070054
Herbert Xueb4dea52008-12-29 23:04:08 -080055#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <linux/types.h>
57#include <linux/fcntl.h>
58#include <linux/module.h>
59#include <linux/random.h>
60#include <linux/cache.h>
61#include <linux/jhash.h>
62#include <linux/init.h>
63#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090064#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020066#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067#include <net/icmp.h>
Arnaldo Carvalho de Melo304a1612005-08-09 19:59:20 -070068#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069#include <net/tcp.h>
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -030070#include <net/transp_v6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#include <net/ipv6.h>
72#include <net/inet_common.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080073#include <net/timewait_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070074#include <net/xfrm.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070075#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030076#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78#include <linux/inet.h>
79#include <linux/ipv6.h>
80#include <linux/stddef.h>
81#include <linux/proc_fs.h>
82#include <linux/seq_file.h>
Ivan Delalande67973182017-06-15 18:07:06 -070083#include <linux/inetdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Herbert Xucf80e0e2016-01-24 21:20:23 +080085#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080086#include <linux/scatterlist.h>
87
Song Liuc24b14c2017-10-23 09:20:24 -070088#include <trace/events/tcp.h>
89
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080090#ifdef CONFIG_TCP_MD5SIG
Eric Dumazeta915da9b2012-01-31 05:18:33 +000091static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -040092 __be32 daddr, __be32 saddr, const struct tcphdr *th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080093#endif
94
Eric Dumazet5caea4e2008-11-20 00:40:07 -080095struct inet_hashinfo tcp_hashinfo;
Eric Dumazet4bc2f182010-07-09 21:22:10 +000096EXPORT_SYMBOL(tcp_hashinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
Eric Dumazet84b114b2017-05-05 06:56:54 -070098static u32 tcp_v4_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700100 return secure_tcp_seq(ip_hdr(skb)->daddr,
101 ip_hdr(skb)->saddr,
102 tcp_hdr(skb)->dest,
103 tcp_hdr(skb)->source);
104}
105
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700106static u32 tcp_v4_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700107{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700108 return secure_tcp_ts_off(net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109}
110
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800111int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112{
113 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
114 struct tcp_sock *tp = tcp_sk(sk);
115
116 /* With PAWS, it is safe from the viewpoint
117 of data integrity. Even without PAWS it is safe provided sequence
118 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
119
120 Actually, the idea is close to VJ's one, only timestamp cache is
121 held not per host, but per port pair and TW bucket is used as state
122 holder.
123
124 If TW bucket has been already destroyed we fall back to VJ's scheme
125 and use initial timestamp retrieved from peer table.
126 */
127 if (tcptw->tw_ts_recent_stamp &&
Haishuang Yan56ab6b92016-12-25 14:33:16 +0800128 (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse &&
James Morris9d729f72007-03-04 16:12:44 -0800129 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800130 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
131 if (tp->write_seq == 0)
132 tp->write_seq = 1;
133 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
134 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
135 sock_hold(sktw);
136 return 1;
137 }
138
139 return 0;
140}
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -0800141EXPORT_SYMBOL_GPL(tcp_twsk_unique);
142
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700143static int tcp_v4_pre_connect(struct sock *sk, struct sockaddr *uaddr,
144 int addr_len)
145{
146 /* This check is replicated from tcp_v4_connect() and intended to
147 * prevent BPF program called below from accessing bytes that are out
148 * of the bound specified by user in addr_len.
149 */
150 if (addr_len < sizeof(struct sockaddr_in))
151 return -EINVAL;
152
153 sock_owned_by_me(sk);
154
155 return BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr);
156}
157
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158/* This will initiate an outgoing connection. */
159int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
160{
David S. Miller2d7192d2011-04-26 13:28:44 -0700161 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 struct inet_sock *inet = inet_sk(sk);
163 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerdca8b082011-02-24 13:38:12 -0800164 __be16 orig_sport, orig_dport;
Al Virobada8ad2006-09-26 21:27:15 -0700165 __be32 daddr, nexthop;
David S. Millerda905bd2011-05-06 16:11:19 -0700166 struct flowi4 *fl4;
David S. Miller2d7192d2011-04-26 13:28:44 -0700167 struct rtable *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 int err;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000169 struct ip_options_rcu *inet_opt;
Haishuang Yan1946e672016-12-28 17:52:32 +0800170 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
172 if (addr_len < sizeof(struct sockaddr_in))
173 return -EINVAL;
174
175 if (usin->sin_family != AF_INET)
176 return -EAFNOSUPPORT;
177
178 nexthop = daddr = usin->sin_addr.s_addr;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000179 inet_opt = rcu_dereference_protected(inet->inet_opt,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200180 lockdep_sock_is_held(sk));
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000181 if (inet_opt && inet_opt->opt.srr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 if (!daddr)
183 return -EINVAL;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000184 nexthop = inet_opt->opt.faddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 }
186
David S. Millerdca8b082011-02-24 13:38:12 -0800187 orig_sport = inet->inet_sport;
188 orig_dport = usin->sin_port;
David S. Millerda905bd2011-05-06 16:11:19 -0700189 fl4 = &inet->cork.fl.u.ip4;
190 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800191 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
192 IPPROTO_TCP,
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200193 orig_sport, orig_dport, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800194 if (IS_ERR(rt)) {
195 err = PTR_ERR(rt);
196 if (err == -ENETUNREACH)
Eric Dumazetf1d8cba2013-11-28 09:51:22 -0800197 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800198 return err;
Wei Dong584bdf82007-05-31 22:49:28 -0700199 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
201 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
202 ip_rt_put(rt);
203 return -ENETUNREACH;
204 }
205
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000206 if (!inet_opt || !inet_opt->opt.srr)
David S. Millerda905bd2011-05-06 16:11:19 -0700207 daddr = fl4->daddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000209 if (!inet->inet_saddr)
David S. Millerda905bd2011-05-06 16:11:19 -0700210 inet->inet_saddr = fl4->saddr;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700211 sk_rcv_saddr_set(sk, inet->inet_saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000213 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 /* Reset inherited state */
215 tp->rx_opt.ts_recent = 0;
216 tp->rx_opt.ts_recent_stamp = 0;
Pavel Emelyanovee995282012-04-19 03:40:39 +0000217 if (likely(!tp->repair))
218 tp->write_seq = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 }
220
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000221 inet->inet_dport = usin->sin_port;
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700222 sk_daddr_set(sk, daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800224 inet_csk(sk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +0000225 if (inet_opt)
226 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
William Allen Simpsonbee7ca92009-11-10 09:51:18 +0000228 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230 /* Socket identity is still unknown (sport may be zero).
231 * However we set state to SYN-SENT and not releasing socket
232 * lock select source port, enter ourselves into the hash tables and
233 * complete initialization after this.
234 */
235 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800236 err = inet_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 if (err)
238 goto failure;
239
Tom Herbert877d1f62015-07-28 16:02:05 -0700240 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530241
David S. Millerda905bd2011-05-06 16:11:19 -0700242 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
David S. Millerb23dd4f2011-03-02 14:31:35 -0800243 inet->inet_sport, inet->inet_dport, sk);
244 if (IS_ERR(rt)) {
245 err = PTR_ERR(rt);
246 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 goto failure;
David S. Millerb23dd4f2011-03-02 14:31:35 -0800248 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 /* OK, now commit destination to socket. */
Herbert Xubcd76112006-06-30 13:36:35 -0700250 sk->sk_gso_type = SKB_GSO_TCPV4;
Changli Gaod8d1f302010-06-10 23:31:35 -0700251 sk_setup_caps(sk, &rt->dst);
Wei Wang19f6d3f2017-01-23 10:59:22 -0800252 rt = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300254 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300255 if (!tp->write_seq)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700256 tp->write_seq = secure_tcp_seq(inet->inet_saddr,
257 inet->inet_daddr,
258 inet->inet_sport,
259 usin->sin_port);
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700260 tp->tsoffset = secure_tcp_ts_off(sock_net(sk),
261 inet->inet_saddr,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700262 inet->inet_daddr);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300263 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000265 inet->inet_id = tp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
Wei Wang19f6d3f2017-01-23 10:59:22 -0800267 if (tcp_fastopen_defer_connect(sk, &err))
268 return err;
269 if (err)
270 goto failure;
271
Andrey Vagin2b916472012-11-22 01:13:58 +0000272 err = tcp_connect(sk);
Pavel Emelyanovee995282012-04-19 03:40:39 +0000273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 if (err)
275 goto failure;
276
277 return 0;
278
279failure:
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200280 /*
281 * This unhashes the socket and releases the local port,
282 * if necessary.
283 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 tcp_set_state(sk, TCP_CLOSE);
285 ip_rt_put(rt);
286 sk->sk_route_caps = 0;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000287 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 return err;
289}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000290EXPORT_SYMBOL(tcp_v4_connect);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292/*
Eric Dumazet563d34d2012-07-23 09:48:52 +0200293 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
294 * It can be called through tcp_release_cb() if socket was owned by user
295 * at the time tcp_v4_err() was called to handle ICMP message.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 */
Neal Cardwell4fab9072014-08-14 12:40:05 -0400297void tcp_v4_mtu_reduced(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 struct inet_sock *inet = inet_sk(sk);
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800300 struct dst_entry *dst;
301 u32 mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800303 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
304 return;
305 mtu = tcp_sk(sk)->mtu_info;
David S. Miller80d0a692012-07-16 03:28:06 -0700306 dst = inet_csk_update_pmtu(sk, mtu);
307 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 return;
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 /* Something is about to be wrong... Remember soft error
311 * for the case, if this connection will not able to recover.
312 */
313 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
314 sk->sk_err_soft = EMSGSIZE;
315
316 mtu = dst_mtu(dst);
317
318 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
Hannes Frederic Sowa482fc602013-11-05 02:24:17 +0100319 ip_sk_accept_pmtu(sk) &&
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800320 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 tcp_sync_mss(sk, mtu);
322
323 /* Resend the TCP packet because it's
324 * clear that the old packet has been
325 * dropped. This is the new "fast" path mtu
326 * discovery.
327 */
328 tcp_simple_retransmit(sk);
329 } /* else let the usual retransmit timer handle it */
330}
Neal Cardwell4fab9072014-08-14 12:40:05 -0400331EXPORT_SYMBOL(tcp_v4_mtu_reduced);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
David S. Miller55be7a92012-07-11 21:27:49 -0700333static void do_redirect(struct sk_buff *skb, struct sock *sk)
334{
335 struct dst_entry *dst = __sk_dst_check(sk, 0);
336
David S. Miller1ed5c482012-07-12 00:41:25 -0700337 if (dst)
David S. Miller6700c272012-07-17 03:29:28 -0700338 dst->ops->redirect(dst, sk, skb);
David S. Miller55be7a92012-07-11 21:27:49 -0700339}
340
Eric Dumazet26e37362015-03-22 10:22:22 -0700341
342/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
Eric Dumazet9cf74902016-02-02 19:31:12 -0800343void tcp_req_err(struct sock *sk, u32 seq, bool abort)
Eric Dumazet26e37362015-03-22 10:22:22 -0700344{
345 struct request_sock *req = inet_reqsk(sk);
346 struct net *net = sock_net(sk);
347
348 /* ICMPs are not backlogged, hence we cannot get
349 * an established socket here.
350 */
Eric Dumazet26e37362015-03-22 10:22:22 -0700351 if (seq != tcp_rsk(req)->snt_isn) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700352 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800353 } else if (abort) {
Eric Dumazet26e37362015-03-22 10:22:22 -0700354 /*
355 * Still in SYN_RECV, just remove it silently.
356 * There is no good way to pass the error to the newly
357 * created socket, and POSIX does not want network
358 * errors returned from accept().
359 */
Fan Duc6973662015-03-23 15:00:41 -0700360 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
Eric Dumazet9caad862016-04-01 08:52:20 -0700361 tcp_listendrop(req->rsk_listener);
Eric Dumazet26e37362015-03-22 10:22:22 -0700362 }
Eric Dumazetef84d8c2015-10-14 11:16:26 -0700363 reqsk_put(req);
Eric Dumazet26e37362015-03-22 10:22:22 -0700364}
365EXPORT_SYMBOL(tcp_req_err);
366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367/*
368 * This routine is called by the ICMP module when it gets some
369 * sort of error condition. If err < 0 then the socket should
370 * be closed and the error returned to the user. If err > 0
371 * it's just the icmp type << 8 | icmp code. After adjustment
372 * header points to the first 8 bytes of the tcp header. We need
373 * to find the appropriate port.
374 *
375 * The locking strategy used here is very "optimistic". When
376 * someone else accesses the socket the ICMP is just dropped
377 * and for some paths there is no check at all.
378 * A more general error queue to queue errors for later handling
379 * is probably better.
380 *
381 */
382
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000383void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000385 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000386 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000387 struct inet_connection_sock *icsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 struct tcp_sock *tp;
389 struct inet_sock *inet;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000390 const int type = icmp_hdr(icmp_skb)->type;
391 const int code = icmp_hdr(icmp_skb)->code;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 struct sock *sk;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000393 struct sk_buff *skb;
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700394 struct request_sock *fastopen;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700395 u32 seq, snd_una;
396 s32 remaining;
397 u32 delta_us;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 int err;
Damian Lukowski4d1a2d92009-08-26 00:16:27 +0000399 struct net *net = dev_net(icmp_skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Eric Dumazet26e37362015-03-22 10:22:22 -0700401 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
402 th->dest, iph->saddr, ntohs(th->source),
David Ahern3fa6f612017-08-07 08:44:17 -0700403 inet_iif(icmp_skb), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 if (!sk) {
Eric Dumazet5d3848b2016-04-27 16:44:29 -0700405 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 return;
407 }
408 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700409 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 return;
411 }
Eric Dumazet26e37362015-03-22 10:22:22 -0700412 seq = ntohl(th->seq);
413 if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazet9cf74902016-02-02 19:31:12 -0800414 return tcp_req_err(sk, seq,
415 type == ICMP_PARAMETERPROB ||
416 type == ICMP_TIME_EXCEEDED ||
417 (type == ICMP_DEST_UNREACH &&
418 (code == ICMP_NET_UNREACH ||
419 code == ICMP_HOST_UNREACH)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
421 bh_lock_sock(sk);
422 /* If too many ICMPs get dropped on busy
423 * servers this needs to be solved differently.
Eric Dumazet563d34d2012-07-23 09:48:52 +0200424 * We do take care of PMTU discovery (RFC1191) special case :
425 * we can receive locally generated ICMP messages while socket is held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 */
Eric Dumazetb74aa932013-01-19 16:10:37 +0000427 if (sock_owned_by_user(sk)) {
428 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700429 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Eric Dumazetb74aa932013-01-19 16:10:37 +0000430 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 if (sk->sk_state == TCP_CLOSE)
432 goto out;
433
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000434 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700435 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
stephen hemminger97e3ecd12010-03-18 11:27:32 +0000436 goto out;
437 }
438
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000439 icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700441 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
442 fastopen = tp->fastopen_rsk;
443 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700445 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700446 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 goto out;
448 }
449
450 switch (type) {
David S. Miller55be7a92012-07-11 21:27:49 -0700451 case ICMP_REDIRECT:
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100452 if (!sock_owned_by_user(sk))
453 do_redirect(icmp_skb, sk);
David S. Miller55be7a92012-07-11 21:27:49 -0700454 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 case ICMP_SOURCE_QUENCH:
456 /* Just silently ignore these. */
457 goto out;
458 case ICMP_PARAMETERPROB:
459 err = EPROTO;
460 break;
461 case ICMP_DEST_UNREACH:
462 if (code > NR_ICMP_UNREACH)
463 goto out;
464
465 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000466 /* We are not interested in TCP_LISTEN and open_requests
467 * (SYN-ACKs send out by Linux are always <576bytes so
468 * they should go through unfragmented).
469 */
470 if (sk->sk_state == TCP_LISTEN)
471 goto out;
472
Eric Dumazet563d34d2012-07-23 09:48:52 +0200473 tp->mtu_info = info;
Eric Dumazet144d56e2012-08-20 00:22:46 +0000474 if (!sock_owned_by_user(sk)) {
Eric Dumazet563d34d2012-07-23 09:48:52 +0200475 tcp_v4_mtu_reduced(sk);
Eric Dumazet144d56e2012-08-20 00:22:46 +0000476 } else {
Eric Dumazet7aa54702016-12-03 11:14:57 -0800477 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags))
Eric Dumazet144d56e2012-08-20 00:22:46 +0000478 sock_hold(sk);
479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 goto out;
481 }
482
483 err = icmp_err_convert[code].errno;
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000484 /* check if icmp_skb allows revert of backoff
485 * (see draft-zimmermann-tcp-lcd) */
486 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
487 break;
488 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700489 !icsk->icsk_backoff || fastopen)
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000490 break;
491
David S. Miller8f49c272010-11-12 13:35:00 -0800492 if (sock_owned_by_user(sk))
493 break;
494
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000495 icsk->icsk_backoff--;
Eric Dumazetfcdd1cf2014-09-22 13:19:44 -0700496 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
497 TCP_TIMEOUT_INIT;
498 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000499
Eric Dumazet75c119a2017-10-05 22:21:27 -0700500 skb = tcp_rtx_queue_head(sk);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000501 BUG_ON(!skb);
502
Eric Dumazet9a568de2017-05-16 14:00:14 -0700503 tcp_mstamp_refresh(tp);
504 delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
Eric Dumazet7faee5c2014-09-05 15:33:33 -0700505 remaining = icsk->icsk_rto -
Eric Dumazet9a568de2017-05-16 14:00:14 -0700506 usecs_to_jiffies(delta_us);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000507
Eric Dumazet9a568de2017-05-16 14:00:14 -0700508 if (remaining > 0) {
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000509 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
510 remaining, TCP_RTO_MAX);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000511 } else {
512 /* RTO revert clocked out retransmission.
513 * Will retransmit now */
514 tcp_retransmit_timer(sk);
515 }
516
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 break;
518 case ICMP_TIME_EXCEEDED:
519 err = EHOSTUNREACH;
520 break;
521 default:
522 goto out;
523 }
524
525 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700527 case TCP_SYN_RECV:
528 /* Only in fast or simultaneous open. If a fast open socket is
529 * is already accepted it is treated as a connected one below.
530 */
Ian Morris51456b22015-04-03 09:17:26 +0100531 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700532 break;
533
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 sk->sk_err = err;
536
537 sk->sk_error_report(sk);
538
539 tcp_done(sk);
540 } else {
541 sk->sk_err_soft = err;
542 }
543 goto out;
544 }
545
546 /* If we've already connected we will keep trying
547 * until we time out, or the user gives up.
548 *
549 * rfc1122 4.2.3.9 allows to consider as hard errors
550 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
551 * but it is obsoleted by pmtu discovery).
552 *
553 * Note, that in modern internet, where routing is unreliable
554 * and in each dark corner broken firewalls sit, sending random
555 * errors ordered by their masters even this two messages finally lose
556 * their original sense (even Linux sends invalid PORT_UNREACHs)
557 *
558 * Now we are in compliance with RFCs.
559 * --ANK (980905)
560 */
561
562 inet = inet_sk(sk);
563 if (!sock_owned_by_user(sk) && inet->recverr) {
564 sk->sk_err = err;
565 sk->sk_error_report(sk);
566 } else { /* Only an error on timeout */
567 sk->sk_err_soft = err;
568 }
569
570out:
571 bh_unlock_sock(sk);
572 sock_put(sk);
573}
574
Daniel Borkmann28850dc2013-06-07 05:11:46 +0000575void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576{
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700577 struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578
Eric Dumazet98be9b12018-02-19 11:56:52 -0800579 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
580 skb->csum_start = skb_transport_header(skb) - skb->head;
581 skb->csum_offset = offsetof(struct tcphdr, check);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582}
583
Herbert Xu419f9f82010-04-11 02:15:53 +0000584/* This routine computes an IPv4 TCP checksum. */
Herbert Xubb296242010-04-11 02:15:55 +0000585void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
Herbert Xu419f9f82010-04-11 02:15:53 +0000586{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400587 const struct inet_sock *inet = inet_sk(sk);
Herbert Xu419f9f82010-04-11 02:15:53 +0000588
589 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
590}
Eric Dumazet4bc2f182010-07-09 21:22:10 +0000591EXPORT_SYMBOL(tcp_v4_send_check);
Herbert Xu419f9f82010-04-11 02:15:53 +0000592
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593/*
594 * This routine will send an RST to the other tcp.
595 *
596 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
597 * for reset.
598 * Answer: if a packet caused RST, it is not for a socket
599 * existing in our system, if it is matched to a socket,
600 * it is just duplicate segment or bug in other side's TCP.
601 * So that we build reply only basing on parameters
602 * arrived with segment.
603 * Exception: precedence violation. We do not implement it in any case.
604 */
605
Eric Dumazeta00e7442015-09-29 07:42:39 -0700606static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400608 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800609 struct {
610 struct tcphdr th;
611#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800612 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800613#endif
614 } rep;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 struct ip_reply_arg arg;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800616#ifdef CONFIG_TCP_MD5SIG
Florian Westphale46787f2015-12-21 21:29:25 +0100617 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000618 const __u8 *hash_location = NULL;
619 unsigned char newhash[16];
620 int genhash;
621 struct sock *sk1 = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800622#endif
Pavel Emelyanova86b1e32008-07-16 20:20:58 -0700623 struct net *net;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
625 /* Never send a reset in response to a reset. */
626 if (th->rst)
627 return;
628
Eric Dumazetc3658e82014-11-25 07:40:04 -0800629 /* If sk not NULL, it means we did a successful lookup and incoming
630 * route had to be correct. prequeue might have dropped our dst.
631 */
632 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 return;
634
635 /* Swap the send and the receive. */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800636 memset(&rep, 0, sizeof(rep));
637 rep.th.dest = th->source;
638 rep.th.source = th->dest;
639 rep.th.doff = sizeof(struct tcphdr) / 4;
640 rep.th.rst = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641
642 if (th->ack) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800643 rep.th.seq = th->ack_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800645 rep.th.ack = 1;
646 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
647 skb->len - (th->doff << 2));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 }
649
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200650 memset(&arg, 0, sizeof(arg));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800651 arg.iov[0].iov_base = (unsigned char *)&rep;
652 arg.iov[0].iov_len = sizeof(rep.th);
653
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800654 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800655#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700656 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000657 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100658 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100659 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
660 &ip_hdr(skb)->saddr, AF_INET);
661 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000662 /*
663 * active side is lost. Try to find listening socket through
664 * source port, and then find md5 key through listening socket.
665 * we are not loose security here:
666 * Incoming packet is checked with md5 hash with finding key,
667 * no RST generated if md5 hash doesn't match.
668 */
Craig Galleka5836362016-02-10 11:50:38 -0500669 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
670 ip_hdr(skb)->saddr,
Tom Herbertda5e3632013-01-22 09:50:24 +0000671 th->source, ip_hdr(skb)->daddr,
David Ahern3fa6f612017-08-07 08:44:17 -0700672 ntohs(th->source), inet_iif(skb),
673 tcp_v4_sdif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000674 /* don't send rst if it can't find key */
675 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700676 goto out;
677
Shawn Lu658ddaa2012-01-31 22:35:48 +0000678 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
679 &ip_hdr(skb)->saddr, AF_INET);
680 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700681 goto out;
682
Shawn Lu658ddaa2012-01-31 22:35:48 +0000683
Eric Dumazet39f8e582015-03-24 15:58:55 -0700684 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000685 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700686 goto out;
687
Shawn Lu658ddaa2012-01-31 22:35:48 +0000688 }
689
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800690 if (key) {
691 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
692 (TCPOPT_NOP << 16) |
693 (TCPOPT_MD5SIG << 8) |
694 TCPOLEN_MD5SIG);
695 /* Update length and the length the header thinks exists */
696 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
697 rep.th.doff = arg.iov[0].iov_len / 4;
698
Adam Langley49a72df2008-07-19 00:01:42 -0700699 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
Ilpo Järvinen78e645cb2008-10-09 14:37:47 -0700700 key, ip_hdr(skb)->saddr,
701 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800702 }
703#endif
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700704 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
705 ip_hdr(skb)->saddr, /* XXX */
Ilpo Järvinen52cd5752008-10-08 11:34:06 -0700706 arg.iov[0].iov_len, IPPROTO_TCP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
Florian Westphal271c3b92015-12-21 21:29:26 +0100708 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
709
Shawn Lue2446ea2012-02-04 12:38:09 +0000710 /* When socket is gone, all binding information is lost.
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000711 * routing might fail in this case. No choice here, if we choose to force
712 * input interface, we will misroute in case of asymmetric route.
Shawn Lue2446ea2012-02-04 12:38:09 +0000713 */
Song Liuc24b14c2017-10-23 09:20:24 -0700714 if (sk) {
Alexey Kuznetsov4c675252012-10-12 04:34:17 +0000715 arg.bound_dev_if = sk->sk_bound_dev_if;
Song Liu5c487bb2018-02-06 20:50:23 -0800716 if (sk_fullsock(sk))
717 trace_tcp_send_reset(sk, skb);
Song Liuc24b14c2017-10-23 09:20:24 -0700718 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
Florian Westphal271c3b92015-12-21 21:29:26 +0100720 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
721 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
722
Eric Dumazet66b13d92011-10-24 03:06:21 -0400723 arg.tos = ip_hdr(skb)->tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900724 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700725 local_bh_disable();
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800726 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
727 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700728 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
729 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700731 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
732 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700733 local_bh_enable();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000734
735#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700736out:
737 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000738#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739}
740
741/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
742 outside socket context is ugly, certainly. What can I do?
743 */
744
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900745static void tcp_v4_send_ack(const struct sock *sk,
Eric Dumazete62a1232016-01-21 08:02:54 -0800746 struct sk_buff *skb, u32 seq, u32 ack,
Andrey Vaginee684b62013-02-11 05:50:19 +0000747 u32 win, u32 tsval, u32 tsecr, int oif,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700748 struct tcp_md5sig_key *key,
Eric Dumazet66b13d92011-10-24 03:06:21 -0400749 int reply_flags, u8 tos)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400751 const struct tcphdr *th = tcp_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 struct {
753 struct tcphdr th;
Al Viro714e85b2006-11-14 20:51:49 -0800754 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800755#ifdef CONFIG_TCP_MD5SIG
Al Viro714e85b2006-11-14 20:51:49 -0800756 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800757#endif
758 ];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 } rep;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900760 struct net *net = sock_net(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 struct ip_reply_arg arg;
762
763 memset(&rep.th, 0, sizeof(struct tcphdr));
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200764 memset(&arg, 0, sizeof(arg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
766 arg.iov[0].iov_base = (unsigned char *)&rep;
767 arg.iov[0].iov_len = sizeof(rep.th);
Andrey Vaginee684b62013-02-11 05:50:19 +0000768 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800769 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
770 (TCPOPT_TIMESTAMP << 8) |
771 TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000772 rep.opt[1] = htonl(tsval);
773 rep.opt[2] = htonl(tsecr);
Craig Schlentercb48cfe2007-01-09 00:11:15 -0800774 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 }
776
777 /* Swap the send and the receive. */
778 rep.th.dest = th->source;
779 rep.th.source = th->dest;
780 rep.th.doff = arg.iov[0].iov_len / 4;
781 rep.th.seq = htonl(seq);
782 rep.th.ack_seq = htonl(ack);
783 rep.th.ack = 1;
784 rep.th.window = htons(win);
785
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800786#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800787 if (key) {
Andrey Vaginee684b62013-02-11 05:50:19 +0000788 int offset = (tsecr) ? 3 : 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800789
790 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
791 (TCPOPT_NOP << 16) |
792 (TCPOPT_MD5SIG << 8) |
793 TCPOLEN_MD5SIG);
794 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
795 rep.th.doff = arg.iov[0].iov_len/4;
796
Adam Langley49a72df2008-07-19 00:01:42 -0700797 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
Adam Langley90b7e112008-07-31 20:49:48 -0700798 key, ip_hdr(skb)->saddr,
799 ip_hdr(skb)->daddr, &rep.th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800800 }
801#endif
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700802 arg.flags = reply_flags;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -0700803 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
804 ip_hdr(skb)->saddr, /* XXX */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 arg.iov[0].iov_len, IPPROTO_TCP, 0);
806 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900807 if (oif)
808 arg.bound_dev_if = oif;
Eric Dumazet66b13d92011-10-24 03:06:21 -0400809 arg.tos = tos;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900810 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700811 local_bh_disable();
Eric Dumazetbdbbb852015-01-29 21:35:05 -0800812 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
813 skb, &TCP_SKB_CB(skb)->header.h4.opt,
Eric Dumazet24a2d432014-09-27 09:50:55 -0700814 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
815 &arg, arg.iov[0].iov_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
Eric Dumazet90bbcc62016-04-27 16:44:32 -0700817 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
Eric Dumazet47dcc202016-05-06 09:46:18 -0700818 local_bh_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819}
820
821static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
822{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700823 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800824 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900826 tcp_v4_send_ack(sk, skb,
Eric Dumazete62a1232016-01-21 08:02:54 -0800827 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200828 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700829 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900830 tcptw->tw_ts_recent,
831 tw->tw_bound_dev_if,
KOVACS Krisztian88ef4a52008-10-01 07:41:00 -0700832 tcp_twsk_md5_key(tcptw),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400833 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
834 tw->tw_tos
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900835 );
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700837 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838}
839
Eric Dumazeta00e7442015-09-29 07:42:39 -0700840static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -0200841 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842{
Jerry Chu168a8f52012-08-31 12:29:13 +0000843 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
844 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
845 */
Eric Dumazete62a1232016-01-21 08:02:54 -0800846 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
847 tcp_sk(sk)->snd_nxt;
848
Eric Dumazet20a2b492016-08-22 11:31:10 -0700849 /* RFC 7323 2.3
850 * The window field (SEG.WND) of every outgoing segment, with the
851 * exception of <SYN> segments, MUST be right-shifted by
852 * Rcv.Wind.Shift bits:
853 */
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900854 tcp_v4_send_ack(sk, skb, seq,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700855 tcp_rsk(req)->rcv_nxt,
856 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -0700857 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +0900858 req->ts_recent,
859 0,
Christoph Paasch30791ac2017-12-11 00:05:46 -0800860 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->saddr,
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000861 AF_INET),
Eric Dumazet66b13d92011-10-24 03:06:21 -0400862 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
863 ip_hdr(skb)->tos);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864}
865
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866/*
Kris Katterjohn9bf1d832008-02-17 22:29:19 -0800867 * Send a SYN-ACK after having received a SYN.
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700868 * This still operates on a request_sock only, not on a big
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 * socket.
870 */
Eric Dumazet0f935db2015-09-25 07:39:21 -0700871static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300872 struct flowi *fl,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800873 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700874 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700875 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700877 const struct inet_request_sock *ireq = inet_rsk(req);
David S. Miller6bd023f2011-05-18 18:32:03 -0400878 struct flowi4 fl4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 int err = -1;
Weilong Chend41db5a2013-12-23 14:37:28 +0800880 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
882 /* First, grab a route. */
David S. Millerba3f7f02012-07-17 14:02:46 -0700883 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800884 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
Eric Dumazetb3d05142016-04-13 22:05:39 -0700886 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887
888 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700889 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Eric Dumazet634fb9792013-10-09 15:21:29 -0700891 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
892 ireq->ir_rmt_addr,
Eric Dumazet06f877d2017-10-24 08:20:31 -0700893 ireq_opt_deref(ireq));
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200894 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 }
896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 return err;
898}
899
900/*
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700901 * IPv4 request_sock destructor.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 */
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700903static void tcp_v4_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904{
Eric Dumazetc92e8c02017-10-20 09:04:13 -0700905 kfree(rcu_dereference_protected(inet_rsk(req)->ireq_opt, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906}
907
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800908#ifdef CONFIG_TCP_MD5SIG
909/*
910 * RFC2385 MD5 checksumming requires a mapping of
911 * IP address->MD5 Key.
912 * We need to maintain these in the sk structure.
913 */
914
915/* Find the Key structure for an address. */
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700916struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000917 const union tcp_md5_addr *addr,
918 int family)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800919{
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700920 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000921 struct tcp_md5sig_key *key;
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700922 const struct tcp_md5sig_info *md5sig;
Ivan Delalande67973182017-06-15 18:07:06 -0700923 __be32 mask;
924 struct tcp_md5sig_key *best_match = NULL;
925 bool match;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800926
Eric Dumazeta8afca02012-01-31 18:45:40 +0000927 /* caller either holds rcu_read_lock() or socket lock */
928 md5sig = rcu_dereference_check(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200929 lockdep_sock_is_held(sk));
Eric Dumazeta8afca02012-01-31 18:45:40 +0000930 if (!md5sig)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800931 return NULL;
Arnd Bergmann083a0322017-06-20 22:11:21 +0200932
Sasha Levinb67bfe02013-02-27 17:06:00 -0800933 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000934 if (key->family != family)
935 continue;
Ivan Delalande67973182017-06-15 18:07:06 -0700936
937 if (family == AF_INET) {
938 mask = inet_make_mask(key->prefixlen);
939 match = (key->addr.a4.s_addr & mask) ==
940 (addr->a4.s_addr & mask);
941#if IS_ENABLED(CONFIG_IPV6)
942 } else if (family == AF_INET6) {
943 match = ipv6_prefix_equal(&key->addr.a6, &addr->a6,
944 key->prefixlen);
945#endif
946 } else {
947 match = false;
948 }
949
950 if (match && (!best_match ||
951 key->prefixlen > best_match->prefixlen))
952 best_match = key;
953 }
954 return best_match;
955}
956EXPORT_SYMBOL(tcp_md5_do_lookup);
957
Wu Fengguange8f37d52017-07-06 07:58:53 +0800958static struct tcp_md5sig_key *tcp_md5_do_lookup_exact(const struct sock *sk,
959 const union tcp_md5_addr *addr,
960 int family, u8 prefixlen)
Ivan Delalande67973182017-06-15 18:07:06 -0700961{
962 const struct tcp_sock *tp = tcp_sk(sk);
963 struct tcp_md5sig_key *key;
964 unsigned int size = sizeof(struct in_addr);
965 const struct tcp_md5sig_info *md5sig;
966
967 /* caller either holds rcu_read_lock() or socket lock */
968 md5sig = rcu_dereference_check(tp->md5sig_info,
969 lockdep_sock_is_held(sk));
970 if (!md5sig)
971 return NULL;
972#if IS_ENABLED(CONFIG_IPV6)
973 if (family == AF_INET6)
974 size = sizeof(struct in6_addr);
975#endif
976 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
977 if (key->family != family)
978 continue;
979 if (!memcmp(&key->addr, addr, size) &&
980 key->prefixlen == prefixlen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000981 return key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800982 }
983 return NULL;
984}
985
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700986struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700987 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800988{
Eric Dumazetb52e6922015-04-09 14:36:42 -0700989 const union tcp_md5_addr *addr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000990
Eric Dumazetb52e6922015-04-09 14:36:42 -0700991 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000992 return tcp_md5_do_lookup(sk, addr, AF_INET);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800993}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800994EXPORT_SYMBOL(tcp_v4_md5_lookup);
995
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800996/* This can be called on a newly created socket, from other files */
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000997int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
Ivan Delalande67973182017-06-15 18:07:06 -0700998 int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
999 gfp_t gfp)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001000{
1001 /* Add Key to the list */
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001002 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001003 struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001004 struct tcp_md5sig_info *md5sig;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001005
Ivan Delalande67973182017-06-15 18:07:06 -07001006 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001007 if (key) {
1008 /* Pre-existing entry - just update that one. */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001009 memcpy(key->key, newkey, newkeylen);
Matthias M. Dellwegb0a713e2007-10-29 20:55:27 -07001010 key->keylen = newkeylen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001011 return 0;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001012 }
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001013
Eric Dumazeta8afca02012-01-31 18:45:40 +00001014 md5sig = rcu_dereference_protected(tp->md5sig_info,
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +02001015 lockdep_sock_is_held(sk));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001016 if (!md5sig) {
1017 md5sig = kmalloc(sizeof(*md5sig), gfp);
1018 if (!md5sig)
1019 return -ENOMEM;
1020
1021 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1022 INIT_HLIST_HEAD(&md5sig->head);
Eric Dumazeta8afca02012-01-31 18:45:40 +00001023 rcu_assign_pointer(tp->md5sig_info, md5sig);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001024 }
1025
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001026 key = sock_kmalloc(sk, sizeof(*key), gfp);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001027 if (!key)
1028 return -ENOMEM;
Eric Dumazet71cea172013-05-20 06:52:26 +00001029 if (!tcp_alloc_md5sig_pool()) {
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001030 sock_kfree_s(sk, key, sizeof(*key));
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001031 return -ENOMEM;
1032 }
1033
1034 memcpy(key->key, newkey, newkeylen);
1035 key->keylen = newkeylen;
1036 key->family = family;
Ivan Delalande67973182017-06-15 18:07:06 -07001037 key->prefixlen = prefixlen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001038 memcpy(&key->addr, addr,
1039 (family == AF_INET6) ? sizeof(struct in6_addr) :
1040 sizeof(struct in_addr));
1041 hlist_add_head_rcu(&key->node, &md5sig->head);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001042 return 0;
1043}
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001044EXPORT_SYMBOL(tcp_md5_do_add);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001045
Ivan Delalande67973182017-06-15 18:07:06 -07001046int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family,
1047 u8 prefixlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001048{
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001049 struct tcp_md5sig_key *key;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001050
Ivan Delalande67973182017-06-15 18:07:06 -07001051 key = tcp_md5_do_lookup_exact(sk, addr, family, prefixlen);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001052 if (!key)
1053 return -ENOENT;
1054 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001055 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001056 kfree_rcu(key, rcu);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001057 return 0;
1058}
1059EXPORT_SYMBOL(tcp_md5_do_del);
1060
stephen hemmingere0683e702012-10-26 14:31:40 +00001061static void tcp_clear_md5_list(struct sock *sk)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001062{
1063 struct tcp_sock *tp = tcp_sk(sk);
1064 struct tcp_md5sig_key *key;
Sasha Levinb67bfe02013-02-27 17:06:00 -08001065 struct hlist_node *n;
Eric Dumazeta8afca02012-01-31 18:45:40 +00001066 struct tcp_md5sig_info *md5sig;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001067
Eric Dumazeta8afca02012-01-31 18:45:40 +00001068 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1069
Sasha Levinb67bfe02013-02-27 17:06:00 -08001070 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001071 hlist_del_rcu(&key->node);
Eric Dumazet5f3d9cb2012-01-31 10:56:48 +00001072 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001073 kfree_rcu(key, rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001074 }
1075}
1076
Ivan Delalande8917a772017-06-15 18:07:07 -07001077static int tcp_v4_parse_md5_keys(struct sock *sk, int optname,
1078 char __user *optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001079{
1080 struct tcp_md5sig cmd;
1081 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
Ivan Delalande8917a772017-06-15 18:07:07 -07001082 u8 prefixlen = 32;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001083
1084 if (optlen < sizeof(cmd))
1085 return -EINVAL;
1086
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02001087 if (copy_from_user(&cmd, optval, sizeof(cmd)))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001088 return -EFAULT;
1089
1090 if (sin->sin_family != AF_INET)
1091 return -EINVAL;
1092
Ivan Delalande8917a772017-06-15 18:07:07 -07001093 if (optname == TCP_MD5SIG_EXT &&
1094 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
1095 prefixlen = cmd.tcpm_prefixlen;
1096 if (prefixlen > 32)
1097 return -EINVAL;
1098 }
1099
Dmitry Popov64a124e2014-08-03 22:45:19 +04001100 if (!cmd.tcpm_keylen)
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001101 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001102 AF_INET, prefixlen);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001103
1104 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1105 return -EINVAL;
1106
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001107 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
Ivan Delalande8917a772017-06-15 18:07:07 -07001108 AF_INET, prefixlen, cmd.tcpm_key, cmd.tcpm_keylen,
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001109 GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001110}
1111
Eric Dumazet19689e32016-06-27 18:51:53 +02001112static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1113 __be32 daddr, __be32 saddr,
1114 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001115{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001116 struct tcp4_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -07001117 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +02001118 struct tcphdr *_th;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001119
Eric Dumazet19689e32016-06-27 18:51:53 +02001120 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001121 bp->saddr = saddr;
1122 bp->daddr = daddr;
1123 bp->pad = 0;
YOSHIFUJI Hideaki076fb722008-04-17 12:48:12 +09001124 bp->protocol = IPPROTO_TCP;
Adam Langley49a72df2008-07-19 00:01:42 -07001125 bp->len = cpu_to_be16(nbytes);
David S. Millerc7da57a2007-10-26 00:41:21 -07001126
Eric Dumazet19689e32016-06-27 18:51:53 +02001127 _th = (struct tcphdr *)(bp + 1);
1128 memcpy(_th, th, sizeof(*th));
1129 _th->check = 0;
1130
1131 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1132 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1133 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +08001134 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -07001135}
1136
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001137static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001138 __be32 daddr, __be32 saddr, const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -07001139{
1140 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001141 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -07001142
1143 hp = tcp_get_md5sig_pool();
1144 if (!hp)
1145 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001146 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001147
Herbert Xucf80e0e2016-01-24 21:20:23 +08001148 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001149 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +02001150 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -07001151 goto clear_hash;
1152 if (tcp_md5_hash_key(hp, key))
1153 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001154 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1155 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001156 goto clear_hash;
1157
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001158 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001159 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -07001160
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001161clear_hash:
1162 tcp_put_md5sig_pool();
1163clear_hash_noput:
1164 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -07001165 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001166}
1167
Eric Dumazet39f8e582015-03-24 15:58:55 -07001168int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1169 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001170 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001171{
Adam Langley49a72df2008-07-19 00:01:42 -07001172 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001173 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001174 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001175 __be32 saddr, daddr;
1176
Eric Dumazet39f8e582015-03-24 15:58:55 -07001177 if (sk) { /* valid for establish/request sockets */
1178 saddr = sk->sk_rcv_saddr;
1179 daddr = sk->sk_daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001180 } else {
Adam Langley49a72df2008-07-19 00:01:42 -07001181 const struct iphdr *iph = ip_hdr(skb);
1182 saddr = iph->saddr;
1183 daddr = iph->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001184 }
Adam Langley49a72df2008-07-19 00:01:42 -07001185
1186 hp = tcp_get_md5sig_pool();
1187 if (!hp)
1188 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001189 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -07001190
Herbert Xucf80e0e2016-01-24 21:20:23 +08001191 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001192 goto clear_hash;
1193
Eric Dumazet19689e32016-06-27 18:51:53 +02001194 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -07001195 goto clear_hash;
1196 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1197 goto clear_hash;
1198 if (tcp_md5_hash_key(hp, key))
1199 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +08001200 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1201 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -07001202 goto clear_hash;
1203
1204 tcp_put_md5sig_pool();
1205 return 0;
1206
1207clear_hash:
1208 tcp_put_md5sig_pool();
1209clear_hash_noput:
1210 memset(md5_hash, 0, 16);
1211 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001212}
Adam Langley49a72df2008-07-19 00:01:42 -07001213EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001214
Eric Dumazetba8e2752015-10-02 11:43:28 -07001215#endif
1216
Eric Dumazetff74e232015-03-24 15:58:54 -07001217/* Called with rcu_read_lock() */
Eric Dumazetba8e2752015-10-02 11:43:28 -07001218static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
Eric Dumazetff74e232015-03-24 15:58:54 -07001219 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001220{
Eric Dumazetba8e2752015-10-02 11:43:28 -07001221#ifdef CONFIG_TCP_MD5SIG
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001222 /*
1223 * This gets called for each TCP segment that arrives
1224 * so we want to be efficient.
1225 * We have 3 drop cases:
1226 * o No MD5 hash and one expected.
1227 * o MD5 hash and we're not expecting one.
1228 * o MD5 hash and its wrong.
1229 */
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001230 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001231 struct tcp_md5sig_key *hash_expected;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001232 const struct iphdr *iph = ip_hdr(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001233 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001234 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001235 unsigned char newhash[16];
1236
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001237 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1238 AF_INET);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +09001239 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001240
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001241 /* We've parsed the options - do we have a hash? */
1242 if (!hash_expected && !hash_location)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001243 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001244
1245 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001246 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001247 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001248 }
1249
1250 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -07001251 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001252 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001253 }
1254
1255 /* Okay, so this is hash_expected and hash_location -
1256 * so we need to calculate the checksum.
1257 */
Adam Langley49a72df2008-07-19 00:01:42 -07001258 genhash = tcp_v4_md5_hash_skb(newhash,
1259 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -07001260 NULL, skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001261
1262 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -07001263 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +00001264 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1265 &iph->saddr, ntohs(th->source),
1266 &iph->daddr, ntohs(th->dest),
1267 genhash ? " tcp_v4_calc_md5_hash failed"
1268 : "");
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001269 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001270 }
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001271 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001272#endif
Eric Dumazetba8e2752015-10-02 11:43:28 -07001273 return false;
1274}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001275
Eric Dumazetb40cf182015-09-25 07:39:08 -07001276static void tcp_v4_init_req(struct request_sock *req,
1277 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +03001278 struct sk_buff *skb)
1279{
1280 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001281 struct net *net = sock_net(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +03001282
Eric Dumazet08d2cc3b2015-03-18 14:05:38 -07001283 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1284 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001285 RCU_INIT_POINTER(ireq->ireq_opt, tcp_v4_save_options(net, skb));
Octavian Purdila16bea702014-06-25 17:09:53 +03001286}
1287
Eric Dumazetf9646292015-09-29 07:42:50 -07001288static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1289 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001290 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +03001291{
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -04001292 return inet_csk_route_req(sk, &fl->u.ip4, req);
Octavian Purdilad94e0412014-06-25 17:09:55 +03001293}
1294
Eric Dumazet72a3eff2006-11-16 02:30:37 -08001295struct request_sock_ops tcp_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 .family = PF_INET,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001297 .obj_size = sizeof(struct tcp_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +03001298 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001299 .send_ack = tcp_v4_reqsk_send_ack,
1300 .destructor = tcp_v4_reqsk_destructor,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 .send_reset = tcp_v4_send_reset,
stephen hemminger688d1942014-08-29 23:32:05 -07001302 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303};
1304
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001305static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +03001306 .mss_clamp = TCP_MSS_DEFAULT,
Octavian Purdila16bea702014-06-25 17:09:53 +03001307#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -07001308 .req_md5_lookup = tcp_v4_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +00001309 .calc_md5_hash = tcp_v4_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -08001310#endif
Octavian Purdila16bea702014-06-25 17:09:53 +03001311 .init_req = tcp_v4_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001312#ifdef CONFIG_SYN_COOKIES
1313 .cookie_init_seq = cookie_v4_init_sequence,
1314#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +03001315 .route_req = tcp_v4_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -07001316 .init_seq = tcp_v4_init_seq,
1317 .init_ts_off = tcp_v4_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +03001318 .send_synack = tcp_v4_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +03001319};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001320
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1322{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 /* Never answer to SYNs send to broadcast or multicast */
Eric Dumazet511c3f92009-06-02 05:14:27 +00001324 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 goto drop;
1326
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001327 return tcp_conn_request(&tcp_request_sock_ops,
1328 &tcp_request_sock_ipv4_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001331 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 return 0;
1333}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001334EXPORT_SYMBOL(tcp_v4_conn_request);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
1336
1337/*
1338 * The three way handshake has completed - we got a valid synack -
1339 * now create the new socket.
1340 */
Eric Dumazet0c271712015-09-29 07:42:48 -07001341struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001342 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001343 struct dst_entry *dst,
1344 struct request_sock *req_unhash,
1345 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001347 struct inet_request_sock *ireq;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 struct inet_sock *newinet;
1349 struct tcp_sock *newtp;
1350 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001351#ifdef CONFIG_TCP_MD5SIG
1352 struct tcp_md5sig_key *key;
1353#endif
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001354 struct ip_options_rcu *inet_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
1356 if (sk_acceptq_is_full(sk))
1357 goto exit_overflow;
1358
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359 newsk = tcp_create_openreq_child(sk, req, skb);
1360 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001361 goto exit_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
Herbert Xubcd76112006-06-30 13:36:35 -07001363 newsk->sk_gso_type = SKB_GSO_TCPV4;
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001364 inet_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
1366 newtp = tcp_sk(newsk);
1367 newinet = inet_sk(newsk);
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001368 ireq = inet_rsk(req);
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001369 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1370 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
David Ahern6dd9a142015-12-16 13:20:44 -08001371 newsk->sk_bound_dev_if = ireq->ir_iif;
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001372 newinet->inet_saddr = ireq->ir_loc_addr;
1373 inet_opt = rcu_dereference(ireq->ireq_opt);
1374 RCU_INIT_POINTER(newinet->inet_opt, inet_opt);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001375 newinet->mc_index = inet_iif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001376 newinet->mc_ttl = ip_hdr(skb)->ttl;
Jiri Benc4c507d22012-02-09 09:35:49 +00001377 newinet->rcv_tos = ip_hdr(skb)->tos;
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001378 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001379 if (inet_opt)
1380 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001381 newinet->inet_id = newtp->write_seq ^ jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382
Eric Dumazetdfd25ff2012-03-10 09:20:21 +00001383 if (!dst) {
1384 dst = inet_csk_route_child_sock(sk, newsk, req);
1385 if (!dst)
1386 goto put_and_exit;
1387 } else {
1388 /* syncookie case : see end of cookie_v4_check() */
1389 }
David S. Miller0e734412011-05-08 15:28:03 -07001390 sk_setup_caps(newsk, dst);
1391
Daniel Borkmann81164412015-01-05 23:57:48 +01001392 tcp_ca_openreq_child(newsk, dst);
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001395 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Tom Quetchenbachf5fff5d2008-09-21 00:21:51 -07001396
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397 tcp_initialize_rcv_mss(newsk);
1398
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001399#ifdef CONFIG_TCP_MD5SIG
1400 /* Copy over the MD5 key from the original socket */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001401 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1402 AF_INET);
Ian Morris00db4122015-04-03 09:17:27 +01001403 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001404 /*
1405 * We're using one, so create a matching key
1406 * on the newsk structure. If we fail to get
1407 * memory, then we end up not copying the key
1408 * across. Shucks.
1409 */
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001410 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
Ivan Delalande67973182017-06-15 18:07:06 -07001411 AF_INET, 32, key->key, key->keylen, GFP_ATOMIC);
Eric Dumazeta4654192010-05-16 00:36:33 -07001412 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001413 }
1414#endif
1415
David S. Miller0e734412011-05-08 15:28:03 -07001416 if (__inet_inherit_port(sk, newsk) < 0)
1417 goto put_and_exit;
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001418 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001419 if (likely(*own_req)) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001420 tcp_move_syn(newtp, req);
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001421 ireq->ireq_opt = NULL;
1422 } else {
1423 newinet->inet_opt = NULL;
1424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 return newsk;
1426
1427exit_overflow:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001428 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001429exit_nonewsk:
1430 dst_release(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431exit:
Eric Dumazet9caad862016-04-01 08:52:20 -07001432 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 return NULL;
David S. Miller0e734412011-05-08 15:28:03 -07001434put_and_exit:
Eric Dumazetc92e8c02017-10-20 09:04:13 -07001435 newinet->inet_opt = NULL;
Christoph Paasche337e242012-12-14 04:07:58 +00001436 inet_csk_prepare_forced_close(newsk);
1437 tcp_done(newsk);
David S. Miller0e734412011-05-08 15:28:03 -07001438 goto exit;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001440EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
Eric Dumazet079096f2015-10-02 11:43:32 -07001442static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001445 const struct tcphdr *th = tcp_hdr(skb);
1446
Florian Westphalaf9b4732010-06-03 00:43:44 +00001447 if (!th->syn)
Cong Wang461b74c2014-10-15 14:33:22 -07001448 sk = cookie_v4_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449#endif
1450 return sk;
1451}
1452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001454 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 *
1456 * We have a potential double-lock case here, so even when
1457 * doing backlog processing we use the BH locking scheme.
1458 * This is because we cannot sleep with the original spinlock
1459 * held.
1460 */
1461int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1462{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001463 struct sock *rsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001464
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet404e0a82012-07-29 23:20:37 +00001466 struct dst_entry *dst = sk->sk_rx_dst;
1467
Tom Herbertbdeab992011-08-14 19:45:55 +00001468 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001469 sk_mark_napi_id(sk, skb);
Eric Dumazet404e0a82012-07-29 23:20:37 +00001470 if (dst) {
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001471 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
Ian Morris51456b22015-04-03 09:17:26 +01001472 !dst->ops->check(dst, 0)) {
David S. Miller92101b32012-07-23 16:29:00 -07001473 dst_release(dst);
1474 sk->sk_rx_dst = NULL;
1475 }
1476 }
Matvejchikov Ilyae42e24c2017-07-24 16:02:12 +04001477 tcp_rcv_established(sk, skb, tcp_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 return 0;
1479 }
1480
Eric Dumazet12e25e12015-06-03 23:49:21 -07001481 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 goto csum_err;
1483
1484 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001485 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1486
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 if (!nsk)
1488 goto discard;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 if (nsk != sk) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001490 if (tcp_child_process(sk, nsk, skb)) {
1491 rsk = nsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 return 0;
1495 }
Eric Dumazetca551582010-06-03 09:03:58 +00001496 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001497 sock_rps_save_rxhash(sk, skb);
Eric Dumazetca551582010-06-03 09:03:58 +00001498
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001499 if (tcp_rcv_state_process(sk, skb)) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001500 rsk = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 goto reset;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 return 0;
1504
1505reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001506 tcp_v4_send_reset(rsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507discard:
1508 kfree_skb(skb);
1509 /* Be careful here. If this function gets more complicated and
1510 * gcc suffers from register pressure on the x86, sk (in %ebx)
1511 * might be destroyed here. This current version compiles correctly,
1512 * but you have been warned.
1513 */
1514 return 0;
1515
1516csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001517 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1518 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 goto discard;
1520}
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001521EXPORT_SYMBOL(tcp_v4_do_rcv);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522
Paolo Abeni74874492017-09-28 15:51:36 +02001523int tcp_v4_early_demux(struct sk_buff *skb)
David S. Miller41063e92012-06-19 21:22:05 -07001524{
David S. Miller41063e92012-06-19 21:22:05 -07001525 const struct iphdr *iph;
1526 const struct tcphdr *th;
1527 struct sock *sk;
David S. Miller41063e92012-06-19 21:22:05 -07001528
David S. Miller41063e92012-06-19 21:22:05 -07001529 if (skb->pkt_type != PACKET_HOST)
Paolo Abeni74874492017-09-28 15:51:36 +02001530 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001531
Eric Dumazet45f00f92012-10-22 21:42:47 +00001532 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
Paolo Abeni74874492017-09-28 15:51:36 +02001533 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001534
1535 iph = ip_hdr(skb);
Eric Dumazet45f00f92012-10-22 21:42:47 +00001536 th = tcp_hdr(skb);
David S. Miller41063e92012-06-19 21:22:05 -07001537
1538 if (th->doff < sizeof(struct tcphdr) / 4)
Paolo Abeni74874492017-09-28 15:51:36 +02001539 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001540
Eric Dumazet45f00f92012-10-22 21:42:47 +00001541 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
David S. Miller41063e92012-06-19 21:22:05 -07001542 iph->saddr, th->source,
Vijay Subramanian7011d082012-06-23 17:38:10 +00001543 iph->daddr, ntohs(th->dest),
David Ahern3fa6f612017-08-07 08:44:17 -07001544 skb->skb_iif, inet_sdif(skb));
David S. Miller41063e92012-06-19 21:22:05 -07001545 if (sk) {
1546 skb->sk = sk;
1547 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001548 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001549 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001550
David S. Miller41063e92012-06-19 21:22:05 -07001551 if (dst)
1552 dst = dst_check(dst, 0);
David S. Miller92101b32012-07-23 16:29:00 -07001553 if (dst &&
Eric Dumazet505fbcf2012-07-27 06:23:40 +00001554 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
David S. Miller92101b32012-07-23 16:29:00 -07001555 skb_dst_set_noref(skb, dst);
David S. Miller41063e92012-06-19 21:22:05 -07001556 }
1557 }
Paolo Abeni74874492017-09-28 15:51:36 +02001558 return 0;
David S. Miller41063e92012-06-19 21:22:05 -07001559}
1560
Eric Dumazetc9c33212016-08-27 07:37:54 -07001561bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1562{
1563 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1564
1565 /* Only socket owner can try to collapse/prune rx queues
1566 * to reduce memory overhead, so add a little headroom here.
1567 * Few sockets backlog are possibly concurrently non empty.
1568 */
1569 limit += 64*1024;
1570
1571 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1572 * we can fix skb->truesize to its real value to avoid future drops.
1573 * This is valid because skb is not yet charged to the socket.
1574 * It has been noticed pure SACK packets were sometimes dropped
1575 * (if cooked by drivers without copybreak feature).
1576 */
Eric Dumazet60b1af32017-01-24 14:57:36 -08001577 skb_condense(skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001578
1579 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1580 bh_unlock_sock(sk);
1581 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1582 return true;
1583 }
1584 return false;
1585}
1586EXPORT_SYMBOL(tcp_add_backlog);
1587
Eric Dumazetac6e7802016-11-10 13:12:35 -08001588int tcp_filter(struct sock *sk, struct sk_buff *skb)
1589{
1590 struct tcphdr *th = (struct tcphdr *)skb->data;
1591 unsigned int eaten = skb->len;
1592 int err;
1593
1594 err = sk_filter_trim_cap(sk, skb, th->doff * 4);
1595 if (!err) {
1596 eaten -= skb->len;
1597 TCP_SKB_CB(skb)->end_seq -= eaten;
1598 }
1599 return err;
1600}
1601EXPORT_SYMBOL(tcp_filter);
1602
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001603static void tcp_v4_restore_cb(struct sk_buff *skb)
1604{
1605 memmove(IPCB(skb), &TCP_SKB_CB(skb)->header.h4,
1606 sizeof(struct inet_skb_parm));
1607}
1608
1609static void tcp_v4_fill_cb(struct sk_buff *skb, const struct iphdr *iph,
1610 const struct tcphdr *th)
1611{
1612 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1613 * barrier() makes sure compiler wont play fool^Waliasing games.
1614 */
1615 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1616 sizeof(struct inet_skb_parm));
1617 barrier();
1618
1619 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1620 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1621 skb->len - th->doff * 4);
1622 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1623 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1624 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1625 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1626 TCP_SKB_CB(skb)->sacked = 0;
1627 TCP_SKB_CB(skb)->has_rxtstamp =
1628 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
1629}
1630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631/*
1632 * From tcp_input.c
1633 */
1634
1635int tcp_v4_rcv(struct sk_buff *skb)
1636{
Eric Dumazet3b24d852016-04-01 08:52:17 -07001637 struct net *net = dev_net(skb->dev);
David Ahern3fa6f612017-08-07 08:44:17 -07001638 int sdif = inet_sdif(skb);
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001639 const struct iphdr *iph;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001640 const struct tcphdr *th;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001641 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642 struct sock *sk;
1643 int ret;
1644
1645 if (skb->pkt_type != PACKET_HOST)
1646 goto discard_it;
1647
1648 /* Count it even if it's bad */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001649 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
1651 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1652 goto discard_it;
1653
Eric Dumazetea1627c2016-05-13 09:16:40 -07001654 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
Eric Dumazetea1627c2016-05-13 09:16:40 -07001656 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657 goto bad_packet;
1658 if (!pskb_may_pull(skb, th->doff * 4))
1659 goto discard_it;
1660
1661 /* An explanation is required here, I think.
1662 * Packet length and doff are validated by header prediction,
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001663 * provided case of th->doff==0 is eliminated.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 * So, we defer the checks. */
Tom Herberted70fcf2014-05-02 16:29:38 -07001665
1666 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001667 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668
Eric Dumazetea1627c2016-05-13 09:16:40 -07001669 th = (const struct tcphdr *)skb->data;
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001670 iph = ip_hdr(skb);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001671lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001672 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
David Ahern3fa6f612017-08-07 08:44:17 -07001673 th->dest, sdif, &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001674 if (!sk)
1675 goto no_tcp_socket;
1676
Eric Dumazetbb134d52010-03-09 05:55:56 +00001677process:
1678 if (sk->sk_state == TCP_TIME_WAIT)
1679 goto do_time_wait;
1680
Eric Dumazet079096f2015-10-02 11:43:32 -07001681 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1682 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001683 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001684 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001685
1686 sk = req->rsk_listener;
Eric Dumazet72923552016-02-11 22:50:29 -08001687 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001688 sk_drops_add(sk, skb);
Eric Dumazet72923552016-02-11 22:50:29 -08001689 reqsk_put(req);
1690 goto discard_it;
1691 }
Eric Dumazet77166822016-02-18 05:39:18 -08001692 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001693 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001694 goto lookup;
1695 }
Eric Dumazet3b24d852016-04-01 08:52:17 -07001696 /* We own a reference on the listener, increase it again
1697 * as we might lose it too soon.
1698 */
Eric Dumazet77166822016-02-18 05:39:18 -08001699 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001700 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001701 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001702 if (!tcp_filter(sk, skb)) {
1703 th = (const struct tcphdr *)skb->data;
1704 iph = ip_hdr(skb);
1705 tcp_v4_fill_cb(skb, iph, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001706 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001707 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001708 if (!nsk) {
1709 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001710 if (req_stolen) {
1711 /* Another cpu got exclusive access to req
1712 * and created a full blown socket.
1713 * Try to feed this packet to this socket
1714 * instead of discarding it.
1715 */
1716 tcp_v4_restore_cb(skb);
1717 sock_put(sk);
1718 goto lookup;
1719 }
Eric Dumazet77166822016-02-18 05:39:18 -08001720 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001721 }
1722 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001723 reqsk_put(req);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001724 tcp_v4_restore_cb(skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001725 } else if (tcp_child_process(sk, nsk, skb)) {
1726 tcp_v4_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001727 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001728 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001729 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001730 return 0;
1731 }
1732 }
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001733 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001734 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001735 goto discard_and_relse;
Eric Dumazet6cce09f2010-03-07 23:21:57 +00001736 }
Stephen Hemmingerd218d112010-01-11 16:28:01 -08001737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1739 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001740
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001741 if (tcp_v4_inbound_md5_hash(sk, skb))
1742 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001743
Patrick McHardyb59c2702006-01-06 23:06:10 -08001744 nf_reset(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
Eric Dumazetac6e7802016-11-10 13:12:35 -08001746 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001748 th = (const struct tcphdr *)skb->data;
1749 iph = ip_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001750 tcp_v4_fill_cb(skb, iph, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
1752 skb->dev = NULL;
1753
Eric Dumazete994b2f2015-10-02 11:43:39 -07001754 if (sk->sk_state == TCP_LISTEN) {
1755 ret = tcp_v4_do_rcv(sk, skb);
1756 goto put_and_return;
1757 }
1758
1759 sk_incoming_cpu_update(sk);
1760
Ingo Molnarc6366182006-07-03 00:25:13 -07001761 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001762 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 ret = 0;
1764 if (!sock_owned_by_user(sk)) {
Florian Westphale7942d02017-07-30 03:57:18 +02001765 ret = tcp_v4_do_rcv(sk, skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001766 } else if (tcp_add_backlog(sk, skb)) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001767 goto discard_and_relse;
1768 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 bh_unlock_sock(sk);
1770
Eric Dumazete994b2f2015-10-02 11:43:39 -07001771put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001772 if (refcounted)
1773 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
1775 return ret;
1776
1777no_tcp_socket:
1778 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1779 goto discard_it;
1780
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001781 tcp_v4_fill_cb(skb, iph, th);
1782
Eric Dumazet12e25e12015-06-03 23:49:21 -07001783 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001784csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001785 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001787 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001789 tcp_v4_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 }
1791
1792discard_it:
1793 /* Discard frame. */
1794 kfree_skb(skb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001795 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
1797discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001798 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001799 if (refcounted)
1800 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 goto discard_it;
1802
1803do_time_wait:
1804 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001805 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 goto discard_it;
1807 }
1808
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001809 tcp_v4_fill_cb(skb, iph, th);
1810
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001811 if (tcp_checksum_complete(skb)) {
1812 inet_twsk_put(inet_twsk(sk));
1813 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 }
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001815 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 case TCP_TW_SYN: {
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001817 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
Craig Galleka5836362016-02-10 11:50:38 -05001818 &tcp_hashinfo, skb,
1819 __tcp_hdrlen(th),
Tom Herbertda5e3632013-01-22 09:50:24 +00001820 iph->saddr, th->source,
Arnaldo Carvalho de Meloeddc9ec2007-04-20 22:47:35 -07001821 iph->daddr, th->dest,
David Ahern3fa6f612017-08-07 08:44:17 -07001822 inet_iif(skb),
1823 sdif);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824 if (sk2) {
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001825 inet_twsk_deschedule_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 sk = sk2;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001827 tcp_v4_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001828 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 goto process;
1830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831 }
Gustavo A. R. Silvafcfd6df2017-10-16 15:48:55 -05001832 /* to ACK */
1833 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 case TCP_TW_ACK:
1835 tcp_v4_timewait_ack(sk, skb);
1836 break;
1837 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001838 tcp_v4_send_reset(sk, skb);
1839 inet_twsk_deschedule_put(inet_twsk(sk));
1840 goto discard_it;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 case TCP_TW_SUCCESS:;
1842 }
1843 goto discard_it;
1844}
1845
David S. Millerccb7c412010-12-01 18:09:13 -08001846static struct timewait_sock_ops tcp_timewait_sock_ops = {
1847 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1848 .twsk_unique = tcp_twsk_unique,
1849 .twsk_destructor= tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001850};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
Eric Dumazet63d02d12012-08-09 14:11:00 +00001852void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
Eric Dumazet5d299f32012-08-06 05:09:33 +00001853{
1854 struct dst_entry *dst = skb_dst(skb);
1855
Eric Dumazet5037e9e2015-12-14 14:08:53 -08001856 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -07001857 sk->sk_rx_dst = dst;
1858 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1859 }
Eric Dumazet5d299f32012-08-06 05:09:33 +00001860}
Eric Dumazet63d02d12012-08-09 14:11:00 +00001861EXPORT_SYMBOL(inet_sk_rx_dst_set);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001862
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001863const struct inet_connection_sock_af_ops ipv4_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001864 .queue_xmit = ip_queue_xmit,
1865 .send_check = tcp_v4_send_check,
1866 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001867 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001868 .conn_request = tcp_v4_conn_request,
1869 .syn_recv_sock = tcp_v4_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001870 .net_header_len = sizeof(struct iphdr),
1871 .setsockopt = ip_setsockopt,
1872 .getsockopt = ip_getsockopt,
1873 .addr2sockaddr = inet_csk_addr2sockaddr,
1874 .sockaddr_len = sizeof(struct sockaddr_in),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001875#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001876 .compat_setsockopt = compat_ip_setsockopt,
1877 .compat_getsockopt = compat_ip_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001878#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001879 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00001881EXPORT_SYMBOL(ipv4_specific);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001883#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001884static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001885 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001886 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001887 .md5_parse = tcp_v4_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001888};
Andrew Mortonb6332e62006-11-30 19:16:28 -08001889#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001890
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891/* NOTE: A lot of things set to zero explicitly by call to
1892 * sk_alloc() so need not be done here.
1893 */
1894static int tcp_v4_init_sock(struct sock *sk)
1895{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001896 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
Neal Cardwell900f65d2012-04-19 09:55:21 +00001898 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001900 icsk->icsk_af_ops = &ipv4_specific;
Neal Cardwell900f65d2012-04-19 09:55:21 +00001901
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001902#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001903 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001904#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905
Linus Torvalds1da177e2005-04-16 15:20:36 -07001906 return 0;
1907}
1908
Brian Haley7d06b2e2008-06-14 17:04:49 -07001909void tcp_v4_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910{
1911 struct tcp_sock *tp = tcp_sk(sk);
1912
Song Liue1a4aa52017-10-23 09:20:26 -07001913 trace_tcp_destroy_sock(sk);
1914
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 tcp_clear_xmit_timers(sk);
1916
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001917 tcp_cleanup_congestion_control(sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07001918
Dave Watson734942c2017-06-14 11:37:14 -07001919 tcp_cleanup_ulp(sk);
1920
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 /* Cleanup up the write buffer. */
David S. Millerfe067e82007-03-07 12:12:44 -08001922 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
Wei Wangcf1ef3f2017-04-20 14:45:46 -07001924 /* Check if we want to disable active TFO */
1925 tcp_fastopen_active_disable_ofo_check(sk);
1926
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 /* Cleans up our, hopefully empty, out_of_order_queue. */
Yaogong Wang9f5afea2016-09-07 14:49:28 -07001928 skb_rbtree_purge(&tp->out_of_order_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001930#ifdef CONFIG_TCP_MD5SIG
1931 /* Clean up the MD5 key list, if any */
1932 if (tp->md5sig_info) {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001933 tcp_clear_md5_list(sk);
Mat Martineaufb7df5e2017-12-21 10:29:10 -08001934 kfree_rcu(rcu_dereference_protected(tp->md5sig_info, 1), rcu);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001935 tp->md5sig_info = NULL;
1936 }
1937#endif
1938
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939 /* Clean up a referenced TCP bind bucket. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001940 if (inet_csk(sk)->icsk_bind_hash)
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001941 inet_put_port(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
Ian Morris00db4122015-04-03 09:17:27 +01001943 BUG_ON(tp->fastopen_rsk);
William Allen Simpson435cf552009-12-02 18:17:05 +00001944
Yuchung Chengcf60af02012-07-19 06:43:09 +00001945 /* If socket is aborted during connect operation */
1946 tcp_free_fastopen_req(tp);
Yuchung Cheng1fba70e2017-10-18 11:22:51 -07001947 tcp_fastopen_destroy_cipher(sk);
Eric Dumazetcd8ae852015-05-03 21:34:46 -07001948 tcp_saved_syn_free(tp);
Yuchung Chengcf60af02012-07-19 06:43:09 +00001949
Glauber Costa180d8cd2011-12-11 21:47:02 +00001950 sk_sockets_allocated_dec(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952EXPORT_SYMBOL(tcp_v4_destroy_sock);
1953
1954#ifdef CONFIG_PROC_FS
1955/* Proc filesystem TCP sock list dumping. */
1956
Tom Herberta8b690f2010-06-07 00:43:42 -07001957/*
1958 * Get next listener socket follow cur. If cur is NULL, get first socket
1959 * starting from bucket given in st->bucket; when st->bucket is zero the
1960 * very first socket in the hash table is returned.
1961 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962static void *listening_get_next(struct seq_file *seq, void *cur)
1963{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02001964 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08001965 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07001966 struct net *net = seq_file_net(seq);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001967 struct inet_listen_hashbucket *ilb;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001968 struct sock *sk = cur;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
1970 if (!sk) {
Eric Dumazet3b24d852016-04-01 08:52:17 -07001971get_head:
Tom Herberta8b690f2010-06-07 00:43:42 -07001972 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Eric Dumazet9652dc22016-10-19 21:24:58 -07001973 spin_lock(&ilb->lock);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001974 sk = sk_head(&ilb->head);
Tom Herberta8b690f2010-06-07 00:43:42 -07001975 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976 goto get_sk;
1977 }
Eric Dumazet5caea4e2008-11-20 00:40:07 -08001978 ilb = &tcp_hashinfo.listening_hash[st->bucket];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07001980 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
Eric Dumazet3b24d852016-04-01 08:52:17 -07001982 sk = sk_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983get_sk:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001984 sk_for_each_from(sk) {
Pavel Emelyanov8475ef92010-11-22 03:26:12 +00001985 if (!net_eq(sock_net(sk), net))
1986 continue;
Christoph Hellwig37d849b2018-04-11 09:31:28 +02001987 if (sk->sk_family == afinfo->family)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001988 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989 }
Eric Dumazet9652dc22016-10-19 21:24:58 -07001990 spin_unlock(&ilb->lock);
Tom Herberta8b690f2010-06-07 00:43:42 -07001991 st->offset = 0;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001992 if (++st->bucket < INET_LHTABLE_SIZE)
1993 goto get_head;
1994 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995}
1996
1997static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1998{
Tom Herberta8b690f2010-06-07 00:43:42 -07001999 struct tcp_iter_state *st = seq->private;
2000 void *rc;
2001
2002 st->bucket = 0;
2003 st->offset = 0;
2004 rc = listening_get_next(seq, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005
2006 while (rc && *pos) {
2007 rc = listening_get_next(seq, rc);
2008 --*pos;
2009 }
2010 return rc;
2011}
2012
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002013static inline bool empty_bucket(const struct tcp_iter_state *st)
Andi Kleen6eac5602008-08-28 01:08:02 -07002014{
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002015 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
Andi Kleen6eac5602008-08-28 01:08:02 -07002016}
2017
Tom Herberta8b690f2010-06-07 00:43:42 -07002018/*
2019 * Get first established socket starting from bucket given in st->bucket.
2020 * If st->bucket is zero, the very first socket in the hash is returned.
2021 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002022static void *established_get_first(struct seq_file *seq)
2023{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002024 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Jianjun Kong5799de02008-11-03 02:49:10 -08002025 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002026 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 void *rc = NULL;
2028
Tom Herberta8b690f2010-06-07 00:43:42 -07002029 st->offset = 0;
2030 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 struct sock *sk;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002032 struct hlist_nulls_node *node;
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002033 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034
Andi Kleen6eac5602008-08-28 01:08:02 -07002035 /* Lockless fast path for the common case of empty buckets */
2036 if (empty_bucket(st))
2037 continue;
2038
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002039 spin_lock_bh(lock);
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002040 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002041 if (sk->sk_family != afinfo->family ||
YOSHIFUJI Hideaki878628f2008-03-26 03:57:35 +09002042 !net_eq(sock_net(sk), net)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 continue;
2044 }
2045 rc = sk;
2046 goto out;
2047 }
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002048 spin_unlock_bh(lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 }
2050out:
2051 return rc;
2052}
2053
2054static void *established_get_next(struct seq_file *seq, void *cur)
2055{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002056 struct tcp_seq_afinfo *afinfo = PDE_DATA(file_inode(seq->file));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 struct sock *sk = cur;
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002058 struct hlist_nulls_node *node;
Jianjun Kong5799de02008-11-03 02:49:10 -08002059 struct tcp_iter_state *st = seq->private;
Denis V. Luneva4146b12008-04-13 22:11:14 -07002060 struct net *net = seq_file_net(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061
2062 ++st->num;
Tom Herberta8b690f2010-06-07 00:43:42 -07002063 ++st->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002065 sk = sk_nulls_next(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08002067 sk_nulls_for_each_from(sk, node) {
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002068 if (sk->sk_family == afinfo->family &&
2069 net_eq(sock_net(sk), net))
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002070 return sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 }
2072
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002073 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2074 ++st->bucket;
2075 return established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076}
2077
2078static void *established_get_idx(struct seq_file *seq, loff_t pos)
2079{
Tom Herberta8b690f2010-06-07 00:43:42 -07002080 struct tcp_iter_state *st = seq->private;
2081 void *rc;
2082
2083 st->bucket = 0;
2084 rc = established_get_first(seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
2086 while (rc && pos) {
2087 rc = established_get_next(seq, rc);
2088 --pos;
Arnaldo Carvalho de Melo71742592006-11-17 10:57:30 -02002089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 return rc;
2091}
2092
2093static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2094{
2095 void *rc;
Jianjun Kong5799de02008-11-03 02:49:10 -08002096 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 st->state = TCP_SEQ_STATE_LISTENING;
2099 rc = listening_get_idx(seq, &pos);
2100
2101 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 st->state = TCP_SEQ_STATE_ESTABLISHED;
2103 rc = established_get_idx(seq, pos);
2104 }
2105
2106 return rc;
2107}
2108
Tom Herberta8b690f2010-06-07 00:43:42 -07002109static void *tcp_seek_last_pos(struct seq_file *seq)
2110{
2111 struct tcp_iter_state *st = seq->private;
2112 int offset = st->offset;
2113 int orig_num = st->num;
2114 void *rc = NULL;
2115
2116 switch (st->state) {
Tom Herberta8b690f2010-06-07 00:43:42 -07002117 case TCP_SEQ_STATE_LISTENING:
2118 if (st->bucket >= INET_LHTABLE_SIZE)
2119 break;
2120 st->state = TCP_SEQ_STATE_LISTENING;
2121 rc = listening_get_next(seq, NULL);
2122 while (offset-- && rc)
2123 rc = listening_get_next(seq, rc);
2124 if (rc)
2125 break;
2126 st->bucket = 0;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002127 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002128 /* Fallthrough */
2129 case TCP_SEQ_STATE_ESTABLISHED:
Tom Herberta8b690f2010-06-07 00:43:42 -07002130 if (st->bucket > tcp_hashinfo.ehash_mask)
2131 break;
2132 rc = established_get_first(seq);
2133 while (offset-- && rc)
2134 rc = established_get_next(seq, rc);
2135 }
2136
2137 st->num = orig_num;
2138
2139 return rc;
2140}
2141
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002142void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143{
Jianjun Kong5799de02008-11-03 02:49:10 -08002144 struct tcp_iter_state *st = seq->private;
Tom Herberta8b690f2010-06-07 00:43:42 -07002145 void *rc;
2146
2147 if (*pos && *pos == st->last_pos) {
2148 rc = tcp_seek_last_pos(seq);
2149 if (rc)
2150 goto out;
2151 }
2152
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 st->state = TCP_SEQ_STATE_LISTENING;
2154 st->num = 0;
Tom Herberta8b690f2010-06-07 00:43:42 -07002155 st->bucket = 0;
2156 st->offset = 0;
2157 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2158
2159out:
2160 st->last_pos = *pos;
2161 return rc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002163EXPORT_SYMBOL(tcp_seq_start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002165void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166{
Tom Herberta8b690f2010-06-07 00:43:42 -07002167 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 void *rc = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169
2170 if (v == SEQ_START_TOKEN) {
2171 rc = tcp_get_idx(seq, 0);
2172 goto out;
2173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
2175 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 case TCP_SEQ_STATE_LISTENING:
2177 rc = listening_get_next(seq, v);
2178 if (!rc) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 st->state = TCP_SEQ_STATE_ESTABLISHED;
Tom Herberta8b690f2010-06-07 00:43:42 -07002180 st->bucket = 0;
2181 st->offset = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 rc = established_get_first(seq);
2183 }
2184 break;
2185 case TCP_SEQ_STATE_ESTABLISHED:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 rc = established_get_next(seq, v);
2187 break;
2188 }
2189out:
2190 ++*pos;
Tom Herberta8b690f2010-06-07 00:43:42 -07002191 st->last_pos = *pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 return rc;
2193}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002194EXPORT_SYMBOL(tcp_seq_next);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002196void tcp_seq_stop(struct seq_file *seq, void *v)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197{
Jianjun Kong5799de02008-11-03 02:49:10 -08002198 struct tcp_iter_state *st = seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
2200 switch (st->state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 case TCP_SEQ_STATE_LISTENING:
2202 if (v != SEQ_START_TOKEN)
Eric Dumazet9652dc22016-10-19 21:24:58 -07002203 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 case TCP_SEQ_STATE_ESTABLISHED:
2206 if (v)
Eric Dumazet9db66bd2008-11-20 20:39:09 -08002207 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 break;
2209 }
2210}
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002211EXPORT_SYMBOL(tcp_seq_stop);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
Eric Dumazetd4f06872015-03-12 16:44:09 -07002213static void get_openreq4(const struct request_sock *req,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002214 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07002216 const struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetfa76ce732015-03-19 19:04:20 -07002217 long delta = req->rsk_timer.expires - jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002219 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002220 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 i,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002222 ireq->ir_loc_addr,
Eric Dumazetd4f06872015-03-12 16:44:09 -07002223 ireq->ir_num,
Eric Dumazet634fb9792013-10-09 15:21:29 -07002224 ireq->ir_rmt_addr,
2225 ntohs(ireq->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 TCP_SYN_RECV,
2227 0, 0, /* could print option size, but that is af dependent. */
2228 1, /* timers active (only the expire timer) */
Eric Dumazeta399a802012-08-08 21:13:53 +00002229 jiffies_delta_to_clock_t(delta),
Eric Dumazete6c022a2012-10-27 23:16:46 +00002230 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002231 from_kuid_munged(seq_user_ns(f),
2232 sock_i_uid(req->rsk_listener)),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 0, /* non standard timer */
2234 0, /* open_requests have no inode */
Eric Dumazetd4f06872015-03-12 16:44:09 -07002235 0,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002236 req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237}
2238
Tetsuo Handa652586d2013-11-14 14:31:57 -08002239static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240{
2241 int timer_active;
2242 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002243 const struct tcp_sock *tp = tcp_sk(sk);
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002244 const struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002245 const struct inet_sock *inet = inet_sk(sk);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002246 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00002247 __be32 dest = inet->inet_daddr;
2248 __be32 src = inet->inet_rcv_saddr;
2249 __u16 destp = ntohs(inet->inet_dport);
2250 __u16 srcp = ntohs(inet->inet_sport);
Eric Dumazet49d09002009-12-03 16:06:13 -08002251 int rx_queue;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002252 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002254 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08002255 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +00002256 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002258 timer_expires = icsk->icsk_timeout;
2259 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002261 timer_expires = icsk->icsk_timeout;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002262 } else if (timer_pending(&sk->sk_timer)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 timer_active = 2;
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002264 timer_expires = sk->sk_timer.expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 } else {
2266 timer_active = 0;
2267 timer_expires = jiffies;
2268 }
2269
Yafang Shao986ffdf2017-12-20 11:12:52 +08002270 state = inet_sk_state_load(sk);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002271 if (state == TCP_LISTEN)
Eric Dumazet49d09002009-12-03 16:06:13 -08002272 rx_queue = sk->sk_ack_backlog;
2273 else
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002274 /* Because we don't lock the socket,
2275 * we might find a transient negative value.
Eric Dumazet49d09002009-12-03 16:06:13 -08002276 */
2277 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2278
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002279 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
Tetsuo Handa652586d2013-11-14 14:31:57 -08002280 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002281 i, src, srcp, dest, destp, state,
Sridhar Samudrala47da8ee2006-06-27 13:29:00 -07002282 tp->write_seq - tp->snd_una,
Eric Dumazet49d09002009-12-03 16:06:13 -08002283 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00002285 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002286 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002287 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002288 icsk->icsk_probes_out,
Ilpo Järvinencf4c6bf2007-02-22 01:13:58 -08002289 sock_i_ino(sk),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002290 refcount_read(&sk->sk_refcnt), sk,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002291 jiffies_to_clock_t(icsk->icsk_rto),
2292 jiffies_to_clock_t(icsk->icsk_ack.ato),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002293 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002295 state == TCP_LISTEN ?
2296 fastopenq->max_qlen :
Tetsuo Handa652586d2013-11-14 14:31:57 -08002297 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298}
2299
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002300static void get_timewait4_sock(const struct inet_timewait_sock *tw,
Tetsuo Handa652586d2013-11-14 14:31:57 -08002301 struct seq_file *f, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302{
Eric Dumazet789f5582015-04-12 18:51:09 -07002303 long delta = tw->tw_timer.expires - jiffies;
Al Viro23f33c22006-09-27 18:43:50 -07002304 __be32 dest, src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
2307 dest = tw->tw_daddr;
2308 src = tw->tw_rcv_saddr;
2309 destp = ntohs(tw->tw_dport);
2310 srcp = ntohs(tw->tw_sport);
2311
Pavel Emelyanov5e659e42008-04-24 01:02:16 -07002312 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
Tetsuo Handa652586d2013-11-14 14:31:57 -08002313 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002315 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002316 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317}
2318
2319#define TMPSZ 150
2320
2321static int tcp4_seq_show(struct seq_file *seq, void *v)
2322{
Jianjun Kong5799de02008-11-03 02:49:10 -08002323 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002324 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
Tetsuo Handa652586d2013-11-14 14:31:57 -08002326 seq_setwidth(seq, TMPSZ - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 if (v == SEQ_START_TOKEN) {
Tetsuo Handa652586d2013-11-14 14:31:57 -08002328 seq_puts(seq, " sl local_address rem_address st tx_queue "
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 "rx_queue tr tm->when retrnsmt uid timeout "
2330 "inode");
2331 goto out;
2332 }
2333 st = seq->private;
2334
Eric Dumazet079096f2015-10-02 11:43:32 -07002335 if (sk->sk_state == TCP_TIME_WAIT)
2336 get_timewait4_sock(v, seq, st->num);
2337 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002338 get_openreq4(v, seq, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002339 else
2340 get_tcp4_sock(v, seq, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341out:
Tetsuo Handa652586d2013-11-14 14:31:57 -08002342 seq_pad(seq, '\n');
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 return 0;
2344}
2345
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002346static const struct seq_operations tcp4_seq_ops = {
2347 .show = tcp4_seq_show,
2348 .start = tcp_seq_start,
2349 .next = tcp_seq_next,
2350 .stop = tcp_seq_stop,
2351};
2352
2353static int tcp_seq_open(struct inode *inode, struct file *file)
2354{
2355 return seq_open_net(inode, file, &tcp4_seq_ops,
2356 sizeof(struct tcp_iter_state));
2357}
2358
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002359static const struct file_operations tcp_afinfo_seq_fops = {
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00002360 .open = tcp_seq_open,
2361 .read = seq_read,
2362 .llseek = seq_lseek,
2363 .release = seq_release_net
2364};
2365
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366static struct tcp_seq_afinfo tcp4_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367 .family = AF_INET,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368};
2369
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002370static int __net_init tcp4_proc_init_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002371{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002372 if (!proc_create_data("tcp", 0444, net->proc_net,
2373 &tcp_afinfo_seq_fops, &tcp4_seq_afinfo))
2374 return -ENOMEM;
2375 return 0;
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002376}
2377
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002378static void __net_exit tcp4_proc_exit_net(struct net *net)
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002379{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002380 remove_proc_entry("tcp", net->proc_net);
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002381}
2382
2383static struct pernet_operations tcp4_net_ops = {
2384 .init = tcp4_proc_init_net,
2385 .exit = tcp4_proc_exit_net,
2386};
2387
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388int __init tcp4_proc_init(void)
2389{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002390 return register_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391}
2392
2393void tcp4_proc_exit(void)
2394{
Pavel Emelyanov757764f2008-03-24 14:56:02 -07002395 unregister_pernet_subsys(&tcp4_net_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396}
2397#endif /* CONFIG_PROC_FS */
2398
2399struct proto tcp_prot = {
2400 .name = "TCP",
2401 .owner = THIS_MODULE,
2402 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002403 .pre_connect = tcp_v4_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404 .connect = tcp_v4_connect,
2405 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002406 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 .ioctl = tcp_ioctl,
2408 .init = tcp_v4_init_sock,
2409 .destroy = tcp_v4_destroy_sock,
2410 .shutdown = tcp_shutdown,
2411 .setsockopt = tcp_setsockopt,
2412 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01002413 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002415 .sendmsg = tcp_sendmsg,
2416 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002417 .backlog_rcv = tcp_v4_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002418 .release_cb = tcp_release_cb,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002419 .hash = inet_hash,
2420 .unhash = inet_unhash,
2421 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07002423 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07002424 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002425 .sockets_allocated = &tcp_sockets_allocated,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002426 .orphan_count = &tcp_orphan_count,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 .memory_allocated = &tcp_memory_allocated,
2428 .memory_pressure = &tcp_memory_pressure,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07002429 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08002430 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2431 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002432 .max_header = MAX_TCP_HEADER,
2433 .obj_size = sizeof(struct tcp_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08002434 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002435 .twsk_prot = &tcp_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002436 .rsk_prot = &tcp_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002437 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002438 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002439#ifdef CONFIG_COMPAT
2440 .compat_setsockopt = compat_tcp_setsockopt,
2441 .compat_getsockopt = compat_tcp_getsockopt,
2442#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002443 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444};
Eric Dumazet4bc2f182010-07-09 21:22:10 +00002445EXPORT_SYMBOL(tcp_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446
Denis V. Lunev046ee902008-04-03 14:31:33 -07002447static void __net_exit tcp_sk_exit(struct net *net)
2448{
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002449 int cpu;
2450
Stephen Hemminger6670e152017-11-14 08:25:49 -08002451 module_put(net->ipv4.tcp_congestion_control->owner);
2452
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002453 for_each_possible_cpu(cpu)
2454 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2455 free_percpu(net->ipv4.tcp_sk);
2456}
2457
2458static int __net_init tcp_sk_init(struct net *net)
2459{
Haishuang Yanfee83d02016-12-28 17:52:33 +08002460 int res, cpu, cnt;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002461
2462 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2463 if (!net->ipv4.tcp_sk)
2464 return -ENOMEM;
2465
2466 for_each_possible_cpu(cpu) {
2467 struct sock *sk;
2468
2469 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2470 IPPROTO_TCP, net);
2471 if (res)
2472 goto fail;
Eric Dumazeta9d65322016-04-01 08:52:21 -07002473 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002474 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2475 }
Daniel Borkmann49213552015-05-19 21:04:22 +02002476
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002477 net->ipv4.sysctl_tcp_ecn = 2;
Daniel Borkmann49213552015-05-19 21:04:22 +02002478 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2479
Fan Dub0f9ca52015-02-10 09:53:16 +08002480 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
Fan Du6b58e0a2015-03-06 11:18:23 +08002481 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
Fan Du05cbc0d2015-03-06 11:18:24 +08002482 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002483
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002484 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
Nikolay Borisov9bd68612016-01-07 16:38:44 +02002485 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
Nikolay Borisovb840d152016-01-07 16:38:45 +02002486 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
Nikolay Borisov13b287e2016-01-07 16:38:43 +02002487
Nikolay Borisov6fa25162016-02-03 09:46:49 +02002488 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
Nikolay Borisov7c083ec2016-02-03 09:46:50 +02002489 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
David S. Miller0aca7372016-02-08 04:24:33 -05002490 net->ipv4.sysctl_tcp_syncookies = 1;
Nikolay Borisov1043e252016-02-03 09:46:52 +02002491 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
Nikolay Borisovae5c3f42016-02-03 09:46:53 +02002492 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
Nikolay Borisovc6214a92016-02-03 09:46:54 +02002493 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
Nikolay Borisovc402d9b2016-02-03 09:46:55 +02002494 net->ipv4.sysctl_tcp_orphan_retries = 0;
Nikolay Borisov1e579ca2016-02-03 09:46:56 +02002495 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
Nikolay Borisov4979f2d2016-02-03 09:46:57 +02002496 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
Haishuang Yan56ab6b92016-12-25 14:33:16 +08002497 net->ipv4.sysctl_tcp_tw_reuse = 0;
Nikolay Borisov12ed8242016-02-03 09:46:51 +02002498
Haishuang Yanfee83d02016-12-28 17:52:33 +08002499 cnt = tcp_hashinfo.ehash_mask + 1;
Haishuang Yanfee83d02016-12-28 17:52:33 +08002500 net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2;
Haishuang Yan1946e672016-12-28 17:52:32 +08002501 net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo;
2502
Haishuang Yanfee83d02016-12-28 17:52:33 +08002503 net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256);
Eric Dumazetf9301032017-06-07 10:34:37 -07002504 net->ipv4.sysctl_tcp_sack = 1;
Eric Dumazet9bb37ef2017-06-07 10:34:38 -07002505 net->ipv4.sysctl_tcp_window_scaling = 1;
Eric Dumazet5d2ed052017-06-07 10:34:39 -07002506 net->ipv4.sysctl_tcp_timestamps = 1;
Eric Dumazet2ae21cf2017-10-26 21:54:56 -07002507 net->ipv4.sysctl_tcp_early_retrans = 3;
Eric Dumazete20223f2017-10-26 21:54:57 -07002508 net->ipv4.sysctl_tcp_recovery = TCP_RACK_LOSS_DETECTION;
Eric Dumazetb510f0d2017-10-26 21:54:59 -07002509 net->ipv4.sysctl_tcp_slow_start_after_idle = 1; /* By default, RFC2861 behavior. */
Eric Dumazete0a1e5b2017-10-26 21:55:00 -07002510 net->ipv4.sysctl_tcp_retrans_collapse = 1;
Eric Dumazetc6e21802017-10-26 21:55:06 -07002511 net->ipv4.sysctl_tcp_max_reordering = 300;
Eric Dumazet6496f6b2017-10-26 21:55:07 -07002512 net->ipv4.sysctl_tcp_dsack = 1;
Eric Dumazet0c126542017-10-26 21:55:08 -07002513 net->ipv4.sysctl_tcp_app_win = 31;
Eric Dumazet94f08932017-10-26 21:55:09 -07002514 net->ipv4.sysctl_tcp_adv_win_scale = 1;
Eric Dumazetaf9b69a2017-10-26 21:55:10 -07002515 net->ipv4.sysctl_tcp_frto = 2;
Eric Dumazet4540c0c2017-10-27 07:47:22 -07002516 net->ipv4.sysctl_tcp_moderate_rcvbuf = 1;
Eric Dumazetd06a9902017-10-27 07:47:23 -07002517 /* This limits the percentage of the congestion window which we
2518 * will allow a single TSO frame to consume. Building TSO frames
2519 * which are too large can cause TCP streams to be bursty.
2520 */
2521 net->ipv4.sysctl_tcp_tso_win_divisor = 3;
Eric Dumazet9184d8b2017-10-27 07:47:25 -07002522 /* Default TSQ limit of four TSO segments */
2523 net->ipv4.sysctl_tcp_limit_output_bytes = 262144;
Eric Dumazetb530b682017-10-27 07:47:26 -07002524 /* rfc5961 challenge ack rate limiting */
2525 net->ipv4.sysctl_tcp_challenge_ack_limit = 1000;
Eric Dumazet26e95962017-10-27 07:47:27 -07002526 net->ipv4.sysctl_tcp_min_tso_segs = 2;
Eric Dumazetbd239702017-10-27 07:47:28 -07002527 net->ipv4.sysctl_tcp_min_rtt_wlen = 300;
Eric Dumazet790f00e2017-10-27 07:47:29 -07002528 net->ipv4.sysctl_tcp_autocorking = 1;
Eric Dumazet4170ba62017-10-27 07:47:30 -07002529 net->ipv4.sysctl_tcp_invalid_ratelimit = HZ/2;
Eric Dumazet23a7102a2017-10-27 07:47:31 -07002530 net->ipv4.sysctl_tcp_pacing_ss_ratio = 200;
Eric Dumazetc26e91f2017-10-27 07:47:32 -07002531 net->ipv4.sysctl_tcp_pacing_ca_ratio = 120;
Eric Dumazet356d1832017-11-07 00:29:28 -08002532 if (net != &init_net) {
2533 memcpy(net->ipv4.sysctl_tcp_rmem,
2534 init_net.ipv4.sysctl_tcp_rmem,
2535 sizeof(init_net.ipv4.sysctl_tcp_rmem));
2536 memcpy(net->ipv4.sysctl_tcp_wmem,
2537 init_net.ipv4.sysctl_tcp_wmem,
2538 sizeof(init_net.ipv4.sysctl_tcp_wmem));
2539 }
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002540 net->ipv4.sysctl_tcp_fastopen = TFO_CLIENT_ENABLE;
Haishuang Yan43713842017-09-27 11:35:42 +08002541 spin_lock_init(&net->ipv4.tcp_fastopen_ctx_lock);
Haishuang Yan3733be12017-09-27 11:35:43 +08002542 net->ipv4.sysctl_tcp_fastopen_blackhole_timeout = 60 * 60;
2543 atomic_set(&net->ipv4.tfo_active_disable_times, 0);
Haishuang Yane1cfcbe2017-09-27 11:35:40 +08002544
Stephen Hemminger6670e152017-11-14 08:25:49 -08002545 /* Reno is always built in */
2546 if (!net_eq(net, &init_net) &&
2547 try_module_get(init_net.ipv4.tcp_congestion_control->owner))
2548 net->ipv4.tcp_congestion_control = init_net.ipv4.tcp_congestion_control;
2549 else
2550 net->ipv4.tcp_congestion_control = &tcp_reno;
2551
Daniel Borkmann49213552015-05-19 21:04:22 +02002552 return 0;
Eric Dumazetbdbbb852015-01-29 21:35:05 -08002553fail:
2554 tcp_sk_exit(net);
2555
2556 return res;
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002557}
2558
2559static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2560{
Haishuang Yan43713842017-09-27 11:35:42 +08002561 struct net *net;
2562
Haishuang Yan1946e672016-12-28 17:52:32 +08002563 inet_twsk_purge(&tcp_hashinfo, AF_INET);
Haishuang Yan43713842017-09-27 11:35:42 +08002564
2565 list_for_each_entry(net, net_exit_list, exit_list)
2566 tcp_fastopen_ctx_destroy(net);
Denis V. Lunev046ee902008-04-03 14:31:33 -07002567}
2568
2569static struct pernet_operations __net_initdata tcp_sk_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002570 .init = tcp_sk_init,
2571 .exit = tcp_sk_exit,
2572 .exit_batch = tcp_sk_exit_batch,
Denis V. Lunev046ee902008-04-03 14:31:33 -07002573};
2574
Denis V. Lunev9b0f9762008-02-29 11:13:15 -08002575void __init tcp_v4_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576{
Eric W. Biederman6a1b3052009-02-22 00:10:18 -08002577 if (register_pernet_subsys(&tcp_sk_ops))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 panic("Failed to create the TCP control socket.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579}