blob: 6ea523d71947779b68d599de11a2a74535f578f0 [file] [log] [blame]
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Support for INET connection oriented protocols.
7 *
8 * Authors: See the TCP sources
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
14 */
15
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070016#include <linux/module.h>
17#include <linux/jhash.h>
18
19#include <net/inet_connection_sock.h>
20#include <net/inet_hashtables.h>
21#include <net/inet_timewait_sock.h>
22#include <net/ip.h>
23#include <net/route.h>
24#include <net/tcp_states.h>
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -070025#include <net/xfrm.h>
Eric Dumazetfa76ce732015-03-19 19:04:20 -070026#include <net/tcp.h>
Craig Gallekc125e802016-02-10 11:50:40 -050027#include <net/sock_reuseport.h>
stephen hemminger96917242017-05-19 09:55:51 -070028#include <net/addrconf.h>
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070029
Josef Bacikfe38d2a2017-01-17 07:51:01 -080030#if IS_ENABLED(CONFIG_IPV6)
31/* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
32 * only, and any IPv4 addresses if not IPv6 only
33 * match_wildcard == false: addresses must be exactly the same, i.e.
34 * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
35 * and 0.0.0.0 equals to 0.0.0.0 only
36 */
Joe Perches7016e062017-09-13 13:58:15 -070037static bool ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
38 const struct in6_addr *sk2_rcv_saddr6,
39 __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
40 bool sk1_ipv6only, bool sk2_ipv6only,
41 bool match_wildcard)
Josef Bacikfe38d2a2017-01-17 07:51:01 -080042{
Josef Bacik637bc8b2017-01-17 07:51:06 -080043 int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
Josef Bacikfe38d2a2017-01-17 07:51:01 -080044 int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
45
46 /* if both are mapped, treat as IPv4 */
47 if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
48 if (!sk2_ipv6only) {
Josef Bacik637bc8b2017-01-17 07:51:06 -080049 if (sk1_rcv_saddr == sk2_rcv_saddr)
Joe Perches7016e062017-09-13 13:58:15 -070050 return true;
Josef Bacik637bc8b2017-01-17 07:51:06 -080051 if (!sk1_rcv_saddr || !sk2_rcv_saddr)
Josef Bacikfe38d2a2017-01-17 07:51:01 -080052 return match_wildcard;
53 }
Joe Perches7016e062017-09-13 13:58:15 -070054 return false;
Josef Bacikfe38d2a2017-01-17 07:51:01 -080055 }
56
57 if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
Joe Perches7016e062017-09-13 13:58:15 -070058 return true;
Josef Bacikfe38d2a2017-01-17 07:51:01 -080059
60 if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
61 !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
Joe Perches7016e062017-09-13 13:58:15 -070062 return true;
Josef Bacikfe38d2a2017-01-17 07:51:01 -080063
64 if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
Josef Bacik637bc8b2017-01-17 07:51:06 -080065 !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
Joe Perches7016e062017-09-13 13:58:15 -070066 return true;
Josef Bacikfe38d2a2017-01-17 07:51:01 -080067
68 if (sk2_rcv_saddr6 &&
Josef Bacik637bc8b2017-01-17 07:51:06 -080069 ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
Joe Perches7016e062017-09-13 13:58:15 -070070 return true;
Josef Bacikfe38d2a2017-01-17 07:51:01 -080071
Joe Perches7016e062017-09-13 13:58:15 -070072 return false;
Josef Bacikfe38d2a2017-01-17 07:51:01 -080073}
74#endif
75
76/* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
77 * match_wildcard == false: addresses must be exactly the same, i.e.
78 * 0.0.0.0 only equals to 0.0.0.0
79 */
Joe Perches7016e062017-09-13 13:58:15 -070080static bool ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
81 bool sk2_ipv6only, bool match_wildcard)
Josef Bacikfe38d2a2017-01-17 07:51:01 -080082{
Josef Bacik637bc8b2017-01-17 07:51:06 -080083 if (!sk2_ipv6only) {
84 if (sk1_rcv_saddr == sk2_rcv_saddr)
Joe Perches7016e062017-09-13 13:58:15 -070085 return true;
Josef Bacik637bc8b2017-01-17 07:51:06 -080086 if (!sk1_rcv_saddr || !sk2_rcv_saddr)
Josef Bacikfe38d2a2017-01-17 07:51:01 -080087 return match_wildcard;
88 }
Joe Perches7016e062017-09-13 13:58:15 -070089 return false;
Josef Bacikfe38d2a2017-01-17 07:51:01 -080090}
91
Joe Perches7016e062017-09-13 13:58:15 -070092bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
93 bool match_wildcard)
Josef Bacikfe38d2a2017-01-17 07:51:01 -080094{
95#if IS_ENABLED(CONFIG_IPV6)
96 if (sk->sk_family == AF_INET6)
Josef Bacik637bc8b2017-01-17 07:51:06 -080097 return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
Josef Bacik319554f2017-01-19 17:47:46 -050098 inet6_rcv_saddr(sk2),
Josef Bacik637bc8b2017-01-17 07:51:06 -080099 sk->sk_rcv_saddr,
100 sk2->sk_rcv_saddr,
101 ipv6_only_sock(sk),
102 ipv6_only_sock(sk2),
103 match_wildcard);
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800104#endif
Josef Bacik637bc8b2017-01-17 07:51:06 -0800105 return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
106 ipv6_only_sock(sk2), match_wildcard);
Josef Bacikfe38d2a2017-01-17 07:51:01 -0800107}
108EXPORT_SYMBOL(inet_rcv_saddr_equal);
109
Martin KaFai Lau2dbb9b92018-08-08 01:01:25 -0700110bool inet_rcv_saddr_any(const struct sock *sk)
111{
112#if IS_ENABLED(CONFIG_IPV6)
113 if (sk->sk_family == AF_INET6)
114 return ipv6_addr_any(&sk->sk_v6_rcv_saddr);
115#endif
116 return !sk->sk_rcv_saddr;
117}
118
Eric W. Biederman0bbf87d2013-09-28 14:10:59 -0700119void inet_get_local_port_range(struct net *net, int *low, int *high)
Stephen Hemminger227b60f2007-10-10 17:30:46 -0700120{
Eric Dumazet95c96172012-04-15 05:58:06 +0000121 unsigned int seq;
122
Stephen Hemminger227b60f2007-10-10 17:30:46 -0700123 do {
Cong Wangc9d8f1a2014-05-06 11:02:49 -0700124 seq = read_seqbegin(&net->ipv4.ip_local_ports.lock);
Stephen Hemminger227b60f2007-10-10 17:30:46 -0700125
Cong Wangc9d8f1a2014-05-06 11:02:49 -0700126 *low = net->ipv4.ip_local_ports.range[0];
127 *high = net->ipv4.ip_local_ports.range[1];
128 } while (read_seqretry(&net->ipv4.ip_local_ports.lock, seq));
Stephen Hemminger227b60f2007-10-10 17:30:46 -0700129}
130EXPORT_SYMBOL(inet_get_local_port_range);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700131
Josef Bacikaa078842017-01-17 07:51:02 -0800132static int inet_csk_bind_conflict(const struct sock *sk,
133 const struct inet_bind_bucket *tb,
134 bool relax, bool reuseport_ok)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700135{
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700136 struct sock *sk2;
Tom Herbert0643ee42016-12-14 16:54:16 -0800137 bool reuse = sk->sk_reuse;
138 bool reuseport = !!sk->sk_reuseport && reuseport_ok;
Tom Herbertda5e3632013-01-22 09:50:24 +0000139 kuid_t uid = sock_i_uid((struct sock *)sk);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700140
Pavel Emelyanov7477fd2e2008-04-14 02:42:27 -0700141 /*
142 * Unlike other sk lookup places we do not check
143 * for sk_net here, since _all_ the socks listed
144 * in tb->owners list belong to the same net - the
145 * one this bucket belongs to.
146 */
147
Sasha Levinb67bfe02013-02-27 17:06:00 -0800148 sk_for_each_bound(sk2, &tb->owners) {
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700149 if (sk != sk2 &&
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700150 (!sk->sk_bound_dev_if ||
151 !sk2->sk_bound_dev_if ||
152 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
Tom Herbertda5e3632013-01-22 09:50:24 +0000153 if ((!reuse || !sk2->sk_reuse ||
154 sk2->sk_state == TCP_LISTEN) &&
155 (!reuseport || !sk2->sk_reuseport ||
Craig Gallekc125e802016-02-10 11:50:40 -0500156 rcu_access_pointer(sk->sk_reuseport_cb) ||
157 (sk2->sk_state != TCP_TIME_WAIT &&
Tom Herbertda5e3632013-01-22 09:50:24 +0000158 !uid_eq(uid, sock_i_uid(sk2))))) {
Josef Bacikaa078842017-01-17 07:51:02 -0800159 if (inet_rcv_saddr_equal(sk, sk2, true))
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700160 break;
David S. Miller8d238b22010-04-28 11:25:59 -0700161 }
Alex Copotaacd9282012-04-12 22:21:45 +0000162 if (!relax && reuse && sk2->sk_reuse &&
163 sk2->sk_state != TCP_LISTEN) {
Josef Bacikaa078842017-01-17 07:51:02 -0800164 if (inet_rcv_saddr_equal(sk, sk2, true))
Alex Copotaacd9282012-04-12 22:21:45 +0000165 break;
166 }
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700167 }
168 }
Sasha Levinb67bfe02013-02-27 17:06:00 -0800169 return sk2 != NULL;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700170}
Arnaldo Carvalho de Melo971af182005-12-13 23:14:47 -0800171
Josef Bacik289141b2017-01-17 07:51:05 -0800172/*
173 * Find an open port number for the socket. Returns with the
174 * inet_bind_hashbucket lock held.
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700175 */
Josef Bacik289141b2017-01-17 07:51:05 -0800176static struct inet_bind_hashbucket *
177inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700178{
Eric Dumazetea8add22016-02-11 16:28:50 -0800179 struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
Josef Bacik289141b2017-01-17 07:51:05 -0800180 int port = 0;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700181 struct inet_bind_hashbucket *head;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +0900182 struct net *net = sock_net(sk);
Eric Dumazetea8add22016-02-11 16:28:50 -0800183 int i, low, high, attempt_half;
184 struct inet_bind_bucket *tb;
Eric Dumazetea8add22016-02-11 16:28:50 -0800185 u32 remaining, offset;
Robert Shearman3c82a212018-11-07 15:36:02 +0000186 int l3mdev;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700187
Robert Shearman3c82a212018-11-07 15:36:02 +0000188 l3mdev = inet_sk_bound_l3mdev(sk);
Eric Dumazetea8add22016-02-11 16:28:50 -0800189 attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
190other_half_scan:
191 inet_get_local_port_range(net, &low, &high);
192 high++; /* [32768, 60999] -> [32768, 61000[ */
193 if (high - low < 4)
194 attempt_half = 0;
195 if (attempt_half) {
196 int half = low + (((high - low) >> 2) << 1);
197
198 if (attempt_half == 1)
199 high = half;
200 else
201 low = half;
202 }
203 remaining = high - low;
204 if (likely(remaining > 1))
205 remaining &= ~1U;
206
207 offset = prandom_u32() % remaining;
208 /* __inet_hash_connect() favors ports having @low parity
209 * We do the opposite to not pollute connect() users.
210 */
211 offset |= 1U;
Eric Dumazetea8add22016-02-11 16:28:50 -0800212
213other_parity_scan:
214 port = low + offset;
215 for (i = 0; i < remaining; i += 2, port += 2) {
216 if (unlikely(port >= high))
217 port -= remaining;
218 if (inet_is_local_reserved_port(net, port))
219 continue;
220 head = &hinfo->bhash[inet_bhashfn(net, port,
221 hinfo->bhash_size)];
222 spin_lock_bh(&head->lock);
223 inet_bind_bucket_for_each(tb, &head->chain)
Robert Shearman3c82a212018-11-07 15:36:02 +0000224 if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
225 tb->port == port) {
Josef Bacik289141b2017-01-17 07:51:05 -0800226 if (!inet_csk_bind_conflict(sk, tb, false, false))
Josef Bacik6cd66612017-01-17 07:51:04 -0800227 goto success;
Eric Dumazetea8add22016-02-11 16:28:50 -0800228 goto next_port;
229 }
Josef Bacik289141b2017-01-17 07:51:05 -0800230 tb = NULL;
231 goto success;
Eric Dumazetea8add22016-02-11 16:28:50 -0800232next_port:
233 spin_unlock_bh(&head->lock);
234 cond_resched();
235 }
236
Eric Dumazetea8add22016-02-11 16:28:50 -0800237 offset--;
238 if (!(offset & 1))
239 goto other_parity_scan;
240
241 if (attempt_half == 1) {
242 /* OK we now try the upper half of the range */
243 attempt_half = 2;
244 goto other_half_scan;
245 }
Josef Bacik289141b2017-01-17 07:51:05 -0800246 return NULL;
247success:
248 *port_ret = port;
249 *tb_ret = tb;
250 return head;
251}
Eric Dumazetea8add22016-02-11 16:28:50 -0800252
Josef Bacik637bc8b2017-01-17 07:51:06 -0800253static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
254 struct sock *sk)
255{
256 kuid_t uid = sock_i_uid(sk);
257
258 if (tb->fastreuseport <= 0)
259 return 0;
260 if (!sk->sk_reuseport)
261 return 0;
262 if (rcu_access_pointer(sk->sk_reuseport_cb))
263 return 0;
264 if (!uid_eq(tb->fastuid, uid))
265 return 0;
266 /* We only need to check the rcv_saddr if this tb was once marked
267 * without fastreuseport and then was reset, as we can only know that
268 * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
269 * owners list.
270 */
271 if (tb->fastreuseport == FASTREUSEPORT_ANY)
272 return 1;
273#if IS_ENABLED(CONFIG_IPV6)
274 if (tb->fast_sk_family == AF_INET6)
275 return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
Josef Bacik7a566732017-09-22 20:20:07 -0400276 inet6_rcv_saddr(sk),
Josef Bacik637bc8b2017-01-17 07:51:06 -0800277 tb->fast_rcv_saddr,
278 sk->sk_rcv_saddr,
279 tb->fast_ipv6_only,
280 ipv6_only_sock(sk), true);
281#endif
282 return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
283 ipv6_only_sock(sk), true);
284}
285
Josef Bacik289141b2017-01-17 07:51:05 -0800286/* Obtain a reference to a local port for the given sock,
287 * if snum is zero it means select any available local port.
288 * We try to allocate an odd port (and leave even ports for connect())
289 */
290int inet_csk_get_port(struct sock *sk, unsigned short snum)
291{
292 bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
293 struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
294 int ret = 1, port = snum;
295 struct inet_bind_hashbucket *head;
296 struct net *net = sock_net(sk);
297 struct inet_bind_bucket *tb = NULL;
298 kuid_t uid = sock_i_uid(sk);
Robert Shearman3c82a212018-11-07 15:36:02 +0000299 int l3mdev;
300
301 l3mdev = inet_sk_bound_l3mdev(sk);
Josef Bacik289141b2017-01-17 07:51:05 -0800302
303 if (!port) {
304 head = inet_csk_find_open_port(sk, &tb, &port);
305 if (!head)
306 return ret;
307 if (!tb)
308 goto tb_not_found;
309 goto success;
310 }
311 head = &hinfo->bhash[inet_bhashfn(net, port,
312 hinfo->bhash_size)];
313 spin_lock_bh(&head->lock);
314 inet_bind_bucket_for_each(tb, &head->chain)
Robert Shearman3c82a212018-11-07 15:36:02 +0000315 if (net_eq(ib_net(tb), net) && tb->l3mdev == l3mdev &&
316 tb->port == port)
Josef Bacik289141b2017-01-17 07:51:05 -0800317 goto tb_found;
Eric Dumazetea8add22016-02-11 16:28:50 -0800318tb_not_found:
319 tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
Robert Shearman3c82a212018-11-07 15:36:02 +0000320 net, head, port, l3mdev);
Eric Dumazetea8add22016-02-11 16:28:50 -0800321 if (!tb)
322 goto fail_unlock;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700323tb_found:
324 if (!hlist_empty(&tb->owners)) {
Pavel Emelyanov4a17fd52012-04-19 03:39:36 +0000325 if (sk->sk_reuse == SK_FORCE_REUSE)
326 goto success;
327
Josef Bacikb9470c22017-01-17 07:51:03 -0800328 if ((tb->fastreuse > 0 && reuse) ||
Josef Bacik637bc8b2017-01-17 07:51:06 -0800329 sk_reuseport_match(tb, sk))
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700330 goto success;
Josef Bacik289141b2017-01-17 07:51:05 -0800331 if (inet_csk_bind_conflict(sk, tb, true, true))
Eric Dumazetea8add22016-02-11 16:28:50 -0800332 goto fail_unlock;
Josef Bacik6cd66612017-01-17 07:51:04 -0800333 }
334success:
Josef Bacikfbed24bc2017-09-22 20:20:08 -0400335 if (hlist_empty(&tb->owners)) {
Eric Dumazetea8add22016-02-11 16:28:50 -0800336 tb->fastreuse = reuse;
Tom Herbertda5e3632013-01-22 09:50:24 +0000337 if (sk->sk_reuseport) {
Josef Bacik637bc8b2017-01-17 07:51:06 -0800338 tb->fastreuseport = FASTREUSEPORT_ANY;
Tom Herbertda5e3632013-01-22 09:50:24 +0000339 tb->fastuid = uid;
Josef Bacik637bc8b2017-01-17 07:51:06 -0800340 tb->fast_rcv_saddr = sk->sk_rcv_saddr;
341 tb->fast_ipv6_only = ipv6_only_sock(sk);
Josef Bacikcbb2fb52017-09-22 20:20:06 -0400342 tb->fast_sk_family = sk->sk_family;
Josef Bacik637bc8b2017-01-17 07:51:06 -0800343#if IS_ENABLED(CONFIG_IPV6)
344 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
345#endif
Eric Dumazetea8add22016-02-11 16:28:50 -0800346 } else {
Tom Herbertda5e3632013-01-22 09:50:24 +0000347 tb->fastreuseport = 0;
Eric Dumazetea8add22016-02-11 16:28:50 -0800348 }
Josef Bacik6cd66612017-01-17 07:51:04 -0800349 } else {
350 if (!reuse)
351 tb->fastreuse = 0;
Josef Bacik637bc8b2017-01-17 07:51:06 -0800352 if (sk->sk_reuseport) {
353 /* We didn't match or we don't have fastreuseport set on
354 * the tb, but we have sk_reuseport set on this socket
355 * and we know that there are no bind conflicts with
356 * this socket in this tb, so reset our tb's reuseport
357 * settings so that any subsequent sockets that match
358 * our current socket will be put on the fast path.
359 *
360 * If we reset we need to set FASTREUSEPORT_STRICT so we
361 * do extra checking for all subsequent sk_reuseport
362 * socks.
363 */
364 if (!sk_reuseport_match(tb, sk)) {
365 tb->fastreuseport = FASTREUSEPORT_STRICT;
366 tb->fastuid = uid;
367 tb->fast_rcv_saddr = sk->sk_rcv_saddr;
368 tb->fast_ipv6_only = ipv6_only_sock(sk);
Josef Bacikcbb2fb52017-09-22 20:20:06 -0400369 tb->fast_sk_family = sk->sk_family;
Josef Bacik637bc8b2017-01-17 07:51:06 -0800370#if IS_ENABLED(CONFIG_IPV6)
371 tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
372#endif
373 }
374 } else {
Josef Bacik6cd66612017-01-17 07:51:04 -0800375 tb->fastreuseport = 0;
Josef Bacik637bc8b2017-01-17 07:51:06 -0800376 }
Tom Herbertda5e3632013-01-22 09:50:24 +0000377 }
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700378 if (!inet_csk(sk)->icsk_bind_hash)
Eric Dumazetea8add22016-02-11 16:28:50 -0800379 inet_bind_hash(sk, tb, port);
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700380 WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900381 ret = 0;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700382
383fail_unlock:
Eric Dumazetea8add22016-02-11 16:28:50 -0800384 spin_unlock_bh(&head->lock);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700385 return ret;
386}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700387EXPORT_SYMBOL_GPL(inet_csk_get_port);
388
389/*
390 * Wait for an incoming connection, avoid race conditions. This must be called
391 * with the socket locked.
392 */
393static int inet_csk_wait_for_connect(struct sock *sk, long timeo)
394{
395 struct inet_connection_sock *icsk = inet_csk(sk);
396 DEFINE_WAIT(wait);
397 int err;
398
399 /*
400 * True wake-one mechanism for incoming connections: only
401 * one process gets woken up, not the 'whole herd'.
402 * Since we do not 'race & poll' for established sockets
403 * anymore, the common case will execute the loop only once.
404 *
405 * Subtle issue: "add_wait_queue_exclusive()" will be added
406 * after any current non-exclusive waiters, and we know that
407 * it will always _stay_ after any new non-exclusive waiters
408 * because all non-exclusive waiters are added at the
409 * beginning of the wait-queue. As such, it's ok to "drop"
410 * our exclusiveness temporarily when we get woken up without
411 * having to remove and re-insert us on the wait queue.
412 */
413 for (;;) {
Eric Dumazetaa395142010-04-20 13:03:51 +0000414 prepare_to_wait_exclusive(sk_sleep(sk), &wait,
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700415 TASK_INTERRUPTIBLE);
416 release_sock(sk);
417 if (reqsk_queue_empty(&icsk->icsk_accept_queue))
418 timeo = schedule_timeout(timeo);
Eric Dumazetcb7cf8a2015-03-16 12:19:24 -0700419 sched_annotate_sleep();
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700420 lock_sock(sk);
421 err = 0;
422 if (!reqsk_queue_empty(&icsk->icsk_accept_queue))
423 break;
424 err = -EINVAL;
425 if (sk->sk_state != TCP_LISTEN)
426 break;
427 err = sock_intr_errno(timeo);
428 if (signal_pending(current))
429 break;
430 err = -EAGAIN;
431 if (!timeo)
432 break;
433 }
Eric Dumazetaa395142010-04-20 13:03:51 +0000434 finish_wait(sk_sleep(sk), &wait);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700435 return err;
436}
437
438/*
439 * This will accept the next outstanding connection.
440 */
David Howellscdfbabf2017-03-09 08:09:05 +0000441struct sock *inet_csk_accept(struct sock *sk, int flags, int *err, bool kern)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700442{
443 struct inet_connection_sock *icsk = inet_csk(sk);
Jerry Chu83368862012-08-31 12:29:12 +0000444 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
Jerry Chu83368862012-08-31 12:29:12 +0000445 struct request_sock *req;
Eric Dumazete3d95ad2015-03-17 18:32:30 -0700446 struct sock *newsk;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700447 int error;
448
449 lock_sock(sk);
450
451 /* We need to make sure that this socket is listening,
452 * and that it has something pending.
453 */
454 error = -EINVAL;
455 if (sk->sk_state != TCP_LISTEN)
456 goto out_err;
457
458 /* Find already established connection */
Jerry Chu83368862012-08-31 12:29:12 +0000459 if (reqsk_queue_empty(queue)) {
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700460 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
461
462 /* If this is a non blocking socket don't sleep */
463 error = -EAGAIN;
464 if (!timeo)
465 goto out_err;
466
467 error = inet_csk_wait_for_connect(sk, timeo);
468 if (error)
469 goto out_err;
470 }
Eric Dumazetfff1f302015-10-02 11:43:23 -0700471 req = reqsk_queue_remove(queue, sk);
Jerry Chu83368862012-08-31 12:29:12 +0000472 newsk = req->sk;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700473
Eric Dumazete3d95ad2015-03-17 18:32:30 -0700474 if (sk->sk_protocol == IPPROTO_TCP &&
Eric Dumazet0536fcc2015-09-29 07:42:52 -0700475 tcp_rsk(req)->tfo_listener) {
476 spin_lock_bh(&queue->fastopenq.lock);
Eric Dumazet9439ce02015-03-17 18:32:29 -0700477 if (tcp_rsk(req)->tfo_listener) {
Jerry Chu83368862012-08-31 12:29:12 +0000478 /* We are still waiting for the final ACK from 3WHS
479 * so can't free req now. Instead, we set req->sk to
480 * NULL to signify that the child socket is taken
481 * so reqsk_fastopen_remove() will free the req
482 * when 3WHS finishes (or is aborted).
483 */
484 req->sk = NULL;
485 req = NULL;
486 }
Eric Dumazet0536fcc2015-09-29 07:42:52 -0700487 spin_unlock_bh(&queue->fastopenq.lock);
Jerry Chu83368862012-08-31 12:29:12 +0000488 }
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700489out:
490 release_sock(sk);
Jerry Chu83368862012-08-31 12:29:12 +0000491 if (req)
Eric Dumazet13854e52015-03-15 21:12:16 -0700492 reqsk_put(req);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700493 return newsk;
494out_err:
495 newsk = NULL;
Jerry Chu83368862012-08-31 12:29:12 +0000496 req = NULL;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700497 *err = error;
498 goto out;
499}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700500EXPORT_SYMBOL(inet_csk_accept);
501
502/*
503 * Using different timers for retransmit, delayed acks and probes
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900504 * We may wish use just one timer maintaining a list of expire jiffies
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700505 * to optimize.
506 */
507void inet_csk_init_xmit_timers(struct sock *sk,
Kees Cook59f379f2017-10-16 17:29:19 -0700508 void (*retransmit_handler)(struct timer_list *t),
509 void (*delack_handler)(struct timer_list *t),
510 void (*keepalive_handler)(struct timer_list *t))
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700511{
512 struct inet_connection_sock *icsk = inet_csk(sk);
513
Kees Cook59f379f2017-10-16 17:29:19 -0700514 timer_setup(&icsk->icsk_retransmit_timer, retransmit_handler, 0);
515 timer_setup(&icsk->icsk_delack_timer, delack_handler, 0);
516 timer_setup(&sk->sk_timer, keepalive_handler, 0);
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700517 icsk->icsk_pending = icsk->icsk_ack.pending = 0;
518}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700519EXPORT_SYMBOL(inet_csk_init_xmit_timers);
520
521void inet_csk_clear_xmit_timers(struct sock *sk)
522{
523 struct inet_connection_sock *icsk = inet_csk(sk);
524
525 icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0;
526
527 sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
528 sk_stop_timer(sk, &icsk->icsk_delack_timer);
529 sk_stop_timer(sk, &sk->sk_timer);
530}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700531EXPORT_SYMBOL(inet_csk_clear_xmit_timers);
532
533void inet_csk_delete_keepalive_timer(struct sock *sk)
534{
535 sk_stop_timer(sk, &sk->sk_timer);
536}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700537EXPORT_SYMBOL(inet_csk_delete_keepalive_timer);
538
539void inet_csk_reset_keepalive_timer(struct sock *sk, unsigned long len)
540{
541 sk_reset_timer(sk, &sk->sk_timer, jiffies + len);
542}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700543EXPORT_SYMBOL(inet_csk_reset_keepalive_timer);
544
Eric Dumazete5895bc2015-09-25 07:39:11 -0700545struct dst_entry *inet_csk_route_req(const struct sock *sk,
David S. Miller6bd023f2011-05-18 18:32:03 -0400546 struct flowi4 *fl4,
David S. Millerba3f7f02012-07-17 14:02:46 -0700547 const struct request_sock *req)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700548{
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700549 const struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700550 struct net *net = read_pnet(&ireq->ireq_net);
Eric Dumazetc92e8c02017-10-20 09:04:13 -0700551 struct ip_options_rcu *opt;
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700552 struct rtable *rt;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700553
Eric Dumazet2ab2ddd2018-10-02 12:35:05 -0700554 rcu_read_lock();
555 opt = rcu_dereference(ireq->ireq_opt);
Eric Dumazet06f877d2017-10-24 08:20:31 -0700556
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700557 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
David S. Millere79d9bc2011-03-31 04:53:20 -0700558 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700559 sk->sk_protocol, inet_sk_flowi_flags(sk),
Eric Dumazet634fb9792013-10-09 15:21:29 -0700560 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700561 ireq->ir_loc_addr, ireq->ir_rmt_port,
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900562 htons(ireq->ir_num), sk->sk_uid);
David S. Miller6bd023f2011-05-18 18:32:03 -0400563 security_req_classify_flow(req, flowi4_to_flowi(fl4));
564 rt = ip_route_output_flow(net, fl4, sk);
David S. Millerb23dd4f2011-03-02 14:31:35 -0800565 if (IS_ERR(rt))
Ilpo Järvinen857a6e02008-12-14 23:13:08 -0800566 goto no_route;
Julian Anastasov155e8332012-10-08 11:41:18 +0000567 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
Ilpo Järvinen857a6e02008-12-14 23:13:08 -0800568 goto route_err;
Eric Dumazet2ab2ddd2018-10-02 12:35:05 -0700569 rcu_read_unlock();
Changli Gaod8d1f302010-06-10 23:31:35 -0700570 return &rt->dst;
Ilpo Järvinen857a6e02008-12-14 23:13:08 -0800571
572route_err:
573 ip_rt_put(rt);
574no_route:
Eric Dumazet2ab2ddd2018-10-02 12:35:05 -0700575 rcu_read_unlock();
Eric Dumazetb45386e2016-04-27 16:44:35 -0700576 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
Ilpo Järvinen857a6e02008-12-14 23:13:08 -0800577 return NULL;
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700578}
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700579EXPORT_SYMBOL_GPL(inet_csk_route_req);
580
Eric Dumazeta2432c42015-09-29 07:42:43 -0700581struct dst_entry *inet_csk_route_child_sock(const struct sock *sk,
David S. Miller77357a92011-05-08 14:34:22 -0700582 struct sock *newsk,
583 const struct request_sock *req)
584{
585 const struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700586 struct net *net = read_pnet(&ireq->ireq_net);
David S. Miller77357a92011-05-08 14:34:22 -0700587 struct inet_sock *newinet = inet_sk(newsk);
Christoph Paasch1a7b27c2012-08-20 02:52:09 +0000588 struct ip_options_rcu *opt;
David S. Miller77357a92011-05-08 14:34:22 -0700589 struct flowi4 *fl4;
590 struct rtable *rt;
591
Eric Dumazetc92e8c02017-10-20 09:04:13 -0700592 opt = rcu_dereference(ireq->ireq_opt);
David S. Miller77357a92011-05-08 14:34:22 -0700593 fl4 = &newinet->cork.fl.u.ip4;
Christoph Paasch1a7b27c2012-08-20 02:52:09 +0000594
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700595 flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
David S. Miller77357a92011-05-08 14:34:22 -0700596 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
597 sk->sk_protocol, inet_sk_flowi_flags(sk),
Eric Dumazet634fb9792013-10-09 15:21:29 -0700598 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->ir_rmt_addr,
Eric Dumazet8b929ab2015-03-22 10:22:20 -0700599 ireq->ir_loc_addr, ireq->ir_rmt_port,
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900600 htons(ireq->ir_num), sk->sk_uid);
David S. Miller77357a92011-05-08 14:34:22 -0700601 security_req_classify_flow(req, flowi4_to_flowi(fl4));
602 rt = ip_route_output_flow(net, fl4, sk);
603 if (IS_ERR(rt))
604 goto no_route;
Julian Anastasov155e8332012-10-08 11:41:18 +0000605 if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
David S. Miller77357a92011-05-08 14:34:22 -0700606 goto route_err;
607 return &rt->dst;
608
609route_err:
610 ip_rt_put(rt);
611no_route:
Eric Dumazetb45386e2016-04-27 16:44:35 -0700612 __IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
David S. Miller77357a92011-05-08 14:34:22 -0700613 return NULL;
614}
615EXPORT_SYMBOL_GPL(inet_csk_route_child_sock);
616
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000617#if IS_ENABLED(CONFIG_IPV6)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700618#define AF_INET_FAMILY(fam) ((fam) == AF_INET)
619#else
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700620#define AF_INET_FAMILY(fam) true
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700621#endif
622
Julian Anastasov0c3d79b2009-10-19 10:03:58 +0000623/* Decide when to expire the request and when to resend SYN-ACK */
624static inline void syn_ack_recalc(struct request_sock *req, const int thresh,
625 const int max_retries,
626 const u8 rskq_defer_accept,
627 int *expire, int *resend)
628{
629 if (!rskq_defer_accept) {
Eric Dumazete6c022a2012-10-27 23:16:46 +0000630 *expire = req->num_timeout >= thresh;
Julian Anastasov0c3d79b2009-10-19 10:03:58 +0000631 *resend = 1;
632 return;
633 }
Eric Dumazete6c022a2012-10-27 23:16:46 +0000634 *expire = req->num_timeout >= thresh &&
635 (!inet_rsk(req)->acked || req->num_timeout >= max_retries);
Julian Anastasov0c3d79b2009-10-19 10:03:58 +0000636 /*
637 * Do not resend while waiting for data after ACK,
638 * start to resend on end of deferring period to give
639 * last chance for data or ACK to create established socket.
640 */
641 *resend = !inet_rsk(req)->acked ||
Eric Dumazete6c022a2012-10-27 23:16:46 +0000642 req->num_timeout >= rskq_defer_accept - 1;
Julian Anastasov0c3d79b2009-10-19 10:03:58 +0000643}
644
Eric Dumazet1b70e972015-09-25 07:39:24 -0700645int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req)
Eric Dumazete6c022a2012-10-27 23:16:46 +0000646{
Christoph Paasch1a2c6182013-03-17 08:23:34 +0000647 int err = req->rsk_ops->rtx_syn_ack(parent, req);
Eric Dumazete6c022a2012-10-27 23:16:46 +0000648
649 if (!err)
650 req->num_retrans++;
651 return err;
652}
653EXPORT_SYMBOL(inet_rtx_syn_ack);
654
Eric Dumazet079096f2015-10-02 11:43:32 -0700655/* return true if req was found in the ehash table */
Eric Dumazetb357a362015-04-23 18:03:44 -0700656static bool reqsk_queue_unlink(struct request_sock_queue *queue,
657 struct request_sock *req)
658{
Eric Dumazet079096f2015-10-02 11:43:32 -0700659 struct inet_hashinfo *hashinfo = req_to_sk(req)->sk_prot->h.hashinfo;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700660 bool found = false;
Eric Dumazetb357a362015-04-23 18:03:44 -0700661
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700662 if (sk_hashed(req_to_sk(req))) {
663 spinlock_t *lock = inet_ehash_lockp(hashinfo, req->rsk_hash);
Eric Dumazetb357a362015-04-23 18:03:44 -0700664
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700665 spin_lock(lock);
666 found = __sk_nulls_del_node_init_rcu(req_to_sk(req));
667 spin_unlock(lock);
668 }
Eric Dumazet83fccfc2015-08-13 15:44:51 -0700669 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer))
Eric Dumazetb357a362015-04-23 18:03:44 -0700670 reqsk_put(req);
671 return found;
672}
673
674void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
675{
676 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
677 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
678 reqsk_put(req);
679 }
680}
681EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
682
Eric Dumazetf03f2e12015-10-14 11:16:27 -0700683void inet_csk_reqsk_queue_drop_and_put(struct sock *sk, struct request_sock *req)
684{
685 inet_csk_reqsk_queue_drop(sk, req);
686 reqsk_put(req);
687}
688EXPORT_SYMBOL(inet_csk_reqsk_queue_drop_and_put);
689
Kees Cook59f379f2017-10-16 17:29:19 -0700690static void reqsk_timer_handler(struct timer_list *t)
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700691{
Kees Cook59f379f2017-10-16 17:29:19 -0700692 struct request_sock *req = from_timer(req, t, rsk_timer);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700693 struct sock *sk_listener = req->rsk_listener;
Nikolay Borisov7c083ec2016-02-03 09:46:50 +0200694 struct net *net = sock_net(sk_listener);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700695 struct inet_connection_sock *icsk = inet_csk(sk_listener);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700696 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
Eric Dumazet2b41fab2015-03-22 10:22:18 -0700697 int qlen, expire = 0, resend = 0;
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700698 int max_retries, thresh;
Eric Dumazet2b41fab2015-03-22 10:22:18 -0700699 u8 defer_accept;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700700
Yafang Shao986ffdf2017-12-20 11:12:52 +0800701 if (inet_sk_state_load(sk_listener) != TCP_LISTEN)
Eric Dumazet079096f2015-10-02 11:43:32 -0700702 goto drop;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700703
Nikolay Borisov7c083ec2016-02-03 09:46:50 +0200704 max_retries = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_synack_retries;
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700705 thresh = max_retries;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700706 /* Normally all the openreqs are young and become mature
707 * (i.e. converted to established socket) for first timeout.
Eric Dumazetfd4f2ce2012-04-12 19:48:40 +0000708 * If synack was not acknowledged for 1 second, it means
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700709 * one of the following things: synack was lost, ack was lost,
710 * rtt is high or nobody planned to ack (i.e. synflood).
711 * When server is a bit loaded, queue is populated with old
712 * open requests, reducing effective size of queue.
713 * When server is well loaded, queue size reduces to zero
714 * after several minutes of work. It is not synflood,
715 * it is normal operation. The solution is pruning
716 * too old entries overriding normal timeout, when
717 * situation becomes dangerous.
718 *
719 * Essentially, we reserve half of room for young
720 * embrions; and abort old ones without pity, if old
721 * ones are about to clog our table.
722 */
Eric Dumazetaac065c2015-10-02 11:43:24 -0700723 qlen = reqsk_queue_len(queue);
Eric Dumazetacb4a6b2015-10-06 14:49:58 -0700724 if ((qlen << 1) > max(8U, sk_listener->sk_max_ack_backlog)) {
Eric Dumazetaac065c2015-10-02 11:43:24 -0700725 int young = reqsk_queue_len_young(queue) << 1;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700726
727 while (thresh > 2) {
Eric Dumazet2b41fab2015-03-22 10:22:18 -0700728 if (qlen < young)
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700729 break;
730 thresh--;
731 young <<= 1;
732 }
733 }
Eric Dumazet2b41fab2015-03-22 10:22:18 -0700734 defer_accept = READ_ONCE(queue->rskq_defer_accept);
735 if (defer_accept)
736 max_retries = defer_accept;
737 syn_ack_recalc(req, thresh, max_retries, defer_accept,
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700738 &expire, &resend);
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700739 req->rsk_ops->syn_ack_timeout(req);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700740 if (!expire &&
741 (!resend ||
742 !inet_rtx_syn_ack(sk_listener, req) ||
743 inet_rsk(req)->acked)) {
744 unsigned long timeo;
David S. Millerec0a1962008-06-12 16:31:35 -0700745
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700746 if (req->num_timeout++ == 0)
Eric Dumazetaac065c2015-10-02 11:43:24 -0700747 atomic_dec(&queue->young);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700748 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
Thomas Gleixnerf3438bc2016-07-04 09:50:23 +0000749 mod_timer(&req->rsk_timer, jiffies + timeo);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700750 return;
751 }
Eric Dumazet079096f2015-10-02 11:43:32 -0700752drop:
Eric Dumazetf03f2e12015-10-14 11:16:27 -0700753 inet_csk_reqsk_queue_drop_and_put(sk_listener, req);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700754}
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700755
Eric Dumazet079096f2015-10-02 11:43:32 -0700756static void reqsk_queue_hash_req(struct request_sock *req,
757 unsigned long timeout)
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700758{
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700759 req->num_retrans = 0;
760 req->num_timeout = 0;
761 req->sk = NULL;
762
Kees Cook59f379f2017-10-16 17:29:19 -0700763 timer_setup(&req->rsk_timer, reqsk_timer_handler, TIMER_PINNED);
Thomas Gleixnerf3438bc2016-07-04 09:50:23 +0000764 mod_timer(&req->rsk_timer, jiffies + timeout);
Eric Dumazet29c68522015-09-19 09:48:04 -0700765
Eric Dumazet079096f2015-10-02 11:43:32 -0700766 inet_ehash_insert(req_to_sk(req), NULL);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700767 /* before letting lookups find us, make sure all req fields
768 * are committed to memory and refcnt initialized.
769 */
770 smp_wmb();
Reshetova, Elena41c6d652017-06-30 13:08:01 +0300771 refcount_set(&req->rsk_refcnt, 2 + 1);
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700772}
Eric Dumazet079096f2015-10-02 11:43:32 -0700773
774void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
775 unsigned long timeout)
776{
777 reqsk_queue_hash_req(req, timeout);
778 inet_csk_reqsk_queue_added(sk);
779}
780EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700781
Eric Dumazete56c57d2011-11-08 17:07:07 -0500782/**
783 * inet_csk_clone_lock - clone an inet socket, and lock its clone
784 * @sk: the socket to clone
785 * @req: request_sock
786 * @priority: for allocation (%GFP_KERNEL, %GFP_ATOMIC, etc)
787 *
788 * Caller must unlock socket even in error path (bh_unlock_sock(newsk))
789 */
790struct sock *inet_csk_clone_lock(const struct sock *sk,
791 const struct request_sock *req,
792 const gfp_t priority)
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700793{
Eric Dumazete56c57d2011-11-08 17:07:07 -0500794 struct sock *newsk = sk_clone_lock(sk, priority);
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700795
Ian Morris00db4122015-04-03 09:17:27 +0100796 if (newsk) {
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700797 struct inet_connection_sock *newicsk = inet_csk(newsk);
798
Yafang Shao563e0bb2017-12-20 11:12:51 +0800799 inet_sk_set_state(newsk, TCP_SYN_RECV);
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700800 newicsk->icsk_bind_hash = NULL;
801
Eric Dumazet634fb9792013-10-09 15:21:29 -0700802 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port;
Eric Dumazetb44084c2013-10-10 00:04:37 -0700803 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num;
804 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num);
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700805
Eric Dumazet85017862016-04-06 22:07:34 -0700806 /* listeners have SOCK_RCU_FREE, not the children */
807 sock_reset_flag(newsk, SOCK_RCU_FREE);
808
Eric Dumazet657831f2017-05-09 06:29:19 -0700809 inet_sk(newsk)->mc_list = NULL;
810
Lorenzo Colitti84f39b02014-05-13 10:17:35 -0700811 newsk->sk_mark = inet_rsk(req)->ir_mark;
Eric Dumazet33cf7c92015-03-11 18:53:14 -0700812 atomic64_set(&newsk->sk_cookie,
813 atomic64_read(&inet_rsk(req)->ir_cookie));
Lorenzo Colitti84f39b02014-05-13 10:17:35 -0700814
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700815 newicsk->icsk_retransmits = 0;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300816 newicsk->icsk_backoff = 0;
817 newicsk->icsk_probes_out = 0;
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700818
819 /* Deinitialize accept_queue to trap illegal accesses. */
820 memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
Venkat Yekkirala4237c752006-07-24 23:32:50 -0700821
822 security_inet_csk_clone(newsk, req);
Arnaldo Carvalho de Melo9f1d2602005-08-09 20:11:24 -0700823 }
824 return newsk;
825}
Eric Dumazete56c57d2011-11-08 17:07:07 -0500826EXPORT_SYMBOL_GPL(inet_csk_clone_lock);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700827
828/*
829 * At this point, there should be no process reference to this
830 * socket, and thus no user references at all. Therefore we
831 * can assume the socket waitqueue is inactive and nobody will
832 * try to jump onto it.
833 */
834void inet_csk_destroy_sock(struct sock *sk)
835{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700836 WARN_ON(sk->sk_state != TCP_CLOSE);
837 WARN_ON(!sock_flag(sk, SOCK_DEAD));
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700838
839 /* It cannot be in hash table! */
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700840 WARN_ON(!sk_unhashed(sk));
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700841
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000842 /* If it has not 0 inet_sk(sk)->inet_num, it must be bound */
843 WARN_ON(inet_sk(sk)->inet_num && !inet_csk(sk)->icsk_bind_hash);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700844
845 sk->sk_prot->destroy(sk);
846
847 sk_stream_kill_queues(sk);
848
849 xfrm_sk_free_policy(sk);
850
851 sk_refcnt_debug_release(sk);
852
Eric Dumazetdd24c002008-11-25 21:17:14 -0800853 percpu_counter_dec(sk->sk_prot->orphan_count);
Eric Dumazetc2a2efb2017-01-20 05:06:08 -0800854
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700855 sock_put(sk);
856}
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700857EXPORT_SYMBOL(inet_csk_destroy_sock);
858
Christoph Paasche337e242012-12-14 04:07:58 +0000859/* This function allows to force a closure of a socket after the call to
860 * tcp/dccp_create_openreq_child().
861 */
862void inet_csk_prepare_forced_close(struct sock *sk)
Christoph Paaschc10cb5f2013-03-07 02:34:33 +0000863 __releases(&sk->sk_lock.slock)
Christoph Paasche337e242012-12-14 04:07:58 +0000864{
865 /* sk_clone_lock locked the socket and set refcnt to 2 */
866 bh_unlock_sock(sk);
867 sock_put(sk);
868
869 /* The below has to be done to allow calling inet_csk_destroy_sock */
870 sock_set_flag(sk, SOCK_DEAD);
871 percpu_counter_inc(sk->sk_prot->orphan_count);
872 inet_sk(sk)->inet_num = 0;
873}
874EXPORT_SYMBOL(inet_csk_prepare_forced_close);
875
Eric Dumazetf985c652015-10-14 06:16:49 -0700876int inet_csk_listen_start(struct sock *sk, int backlog)
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700877{
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700878 struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazet10cbc8f2015-10-02 11:43:36 -0700879 struct inet_sock *inet = inet_sk(sk);
Craig Gallek086c6532016-02-10 11:50:35 -0500880 int err = -EADDRINUSE;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700881
Eric Dumazetef547f22015-10-02 11:43:37 -0700882 reqsk_queue_alloc(&icsk->icsk_accept_queue);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700883
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700884 sk->sk_ack_backlog = 0;
885 inet_csk_delack_init(sk);
886
887 /* There is race window here: we announce ourselves listening,
888 * but this transition is still not validated by get_port().
889 * It is OK, because this socket enters to hash table only
890 * after validation is complete.
891 */
Yafang Shao563e0bb2017-12-20 11:12:51 +0800892 inet_sk_state_store(sk, TCP_LISTEN);
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000893 if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
894 inet->inet_sport = htons(inet->inet_num);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700895
896 sk_dst_reset(sk);
Craig Gallek086c6532016-02-10 11:50:35 -0500897 err = sk->sk_prot->hash(sk);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700898
Craig Gallek086c6532016-02-10 11:50:35 -0500899 if (likely(!err))
900 return 0;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700901 }
902
Yafang Shao563e0bb2017-12-20 11:12:51 +0800903 inet_sk_set_state(sk, TCP_CLOSE);
Craig Gallek086c6532016-02-10 11:50:35 -0500904 return err;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700905}
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700906EXPORT_SYMBOL_GPL(inet_csk_listen_start);
907
Eric Dumazetebb516a2015-10-14 11:16:28 -0700908static void inet_child_forget(struct sock *sk, struct request_sock *req,
909 struct sock *child)
910{
911 sk->sk_prot->disconnect(child, O_NONBLOCK);
912
913 sock_orphan(child);
914
915 percpu_counter_inc(sk->sk_prot->orphan_count);
916
917 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) {
918 BUG_ON(tcp_sk(child)->fastopen_rsk != req);
919 BUG_ON(sk != req->rsk_listener);
920
921 /* Paranoid, to prevent race condition if
922 * an inbound pkt destined for child is
923 * blocked by sock lock in tcp_v4_rcv().
924 * Also to satisfy an assertion in
925 * tcp_v4_destroy_sock().
926 */
927 tcp_sk(child)->fastopen_rsk = NULL;
928 }
929 inet_csk_destroy_sock(child);
Eric Dumazetebb516a2015-10-14 11:16:28 -0700930}
931
Eric Dumazet77166822016-02-18 05:39:18 -0800932struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
933 struct request_sock *req,
934 struct sock *child)
Eric Dumazetebb516a2015-10-14 11:16:28 -0700935{
936 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
937
938 spin_lock(&queue->rskq_lock);
939 if (unlikely(sk->sk_state != TCP_LISTEN)) {
940 inet_child_forget(sk, req, child);
Eric Dumazet77166822016-02-18 05:39:18 -0800941 child = NULL;
Eric Dumazetebb516a2015-10-14 11:16:28 -0700942 } else {
943 req->sk = child;
944 req->dl_next = NULL;
945 if (queue->rskq_accept_head == NULL)
946 queue->rskq_accept_head = req;
947 else
948 queue->rskq_accept_tail->dl_next = req;
949 queue->rskq_accept_tail = req;
950 sk_acceptq_added(sk);
951 }
952 spin_unlock(&queue->rskq_lock);
Eric Dumazet77166822016-02-18 05:39:18 -0800953 return child;
Eric Dumazetebb516a2015-10-14 11:16:28 -0700954}
955EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
956
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700957struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
958 struct request_sock *req, bool own_req)
959{
960 if (own_req) {
961 inet_csk_reqsk_queue_drop(sk, req);
962 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
Eric Dumazet77166822016-02-18 05:39:18 -0800963 if (inet_csk_reqsk_queue_add(sk, req, child))
964 return child;
Eric Dumazet5e0724d2015-10-22 08:20:46 -0700965 }
966 /* Too bad, another child took ownership of the request, undo. */
967 bh_unlock_sock(child);
968 sock_put(child);
969 return NULL;
970}
971EXPORT_SYMBOL(inet_csk_complete_hashdance);
972
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700973/*
974 * This routine closes sockets which have been at least partially
975 * opened, but not yet accepted.
976 */
977void inet_csk_listen_stop(struct sock *sk)
978{
979 struct inet_connection_sock *icsk = inet_csk(sk);
Jerry Chu83368862012-08-31 12:29:12 +0000980 struct request_sock_queue *queue = &icsk->icsk_accept_queue;
Eric Dumazetfff1f302015-10-02 11:43:23 -0700981 struct request_sock *next, *req;
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700982
983 /* Following specs, it would be better either to send FIN
984 * (and enter FIN-WAIT-1, it is normal close)
985 * or to send active reset (abort).
986 * Certainly, it is pretty dangerous while synflood, but it is
987 * bad justification for our negligence 8)
988 * To be honest, we are not able to make either
989 * of the variants now. --ANK
990 */
Eric Dumazetfff1f302015-10-02 11:43:23 -0700991 while ((req = reqsk_queue_remove(queue, sk)) != NULL) {
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700992 struct sock *child = req->sk;
993
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700994 local_bh_disable();
995 bh_lock_sock(child);
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700996 WARN_ON(sock_owned_by_user(child));
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -0700997 sock_hold(child);
998
Eric Dumazetebb516a2015-10-14 11:16:28 -0700999 inet_child_forget(sk, req, child);
Eric Dumazetda8ab572017-09-11 15:58:38 -07001000 reqsk_put(req);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -07001001 bh_unlock_sock(child);
1002 local_bh_enable();
1003 sock_put(child);
1004
Eric Dumazet92d6f172015-10-02 11:43:38 -07001005 cond_resched();
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -07001006 }
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001007 if (queue->fastopenq.rskq_rst_head) {
Jerry Chu83368862012-08-31 12:29:12 +00001008 /* Free all the reqs queued in rskq_rst_head. */
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001009 spin_lock_bh(&queue->fastopenq.lock);
Eric Dumazetfff1f302015-10-02 11:43:23 -07001010 req = queue->fastopenq.rskq_rst_head;
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001011 queue->fastopenq.rskq_rst_head = NULL;
1012 spin_unlock_bh(&queue->fastopenq.lock);
Eric Dumazetfff1f302015-10-02 11:43:23 -07001013 while (req != NULL) {
1014 next = req->dl_next;
Eric Dumazet13854e52015-03-15 21:12:16 -07001015 reqsk_put(req);
Eric Dumazetfff1f302015-10-02 11:43:23 -07001016 req = next;
Jerry Chu83368862012-08-31 12:29:12 +00001017 }
1018 }
Eric Dumazetebb516a2015-10-14 11:16:28 -07001019 WARN_ON_ONCE(sk->sk_ack_backlog);
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -07001020}
Arnaldo Carvalho de Meloa019d6f2005-08-09 20:15:09 -07001021EXPORT_SYMBOL_GPL(inet_csk_listen_stop);
Arnaldo Carvalho de Meloaf05dc92005-12-13 23:16:04 -08001022
1023void inet_csk_addr2sockaddr(struct sock *sk, struct sockaddr *uaddr)
1024{
1025 struct sockaddr_in *sin = (struct sockaddr_in *)uaddr;
1026 const struct inet_sock *inet = inet_sk(sk);
1027
1028 sin->sin_family = AF_INET;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001029 sin->sin_addr.s_addr = inet->inet_daddr;
1030 sin->sin_port = inet->inet_dport;
Arnaldo Carvalho de Meloaf05dc92005-12-13 23:16:04 -08001031}
Arnaldo Carvalho de Meloaf05dc92005-12-13 23:16:04 -08001032EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr);
Arnaldo Carvalho de Meloc4d93902006-03-20 22:01:03 -08001033
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001034#ifdef CONFIG_COMPAT
1035int inet_csk_compat_getsockopt(struct sock *sk, int level, int optname,
1036 char __user *optval, int __user *optlen)
1037{
David S. Millerdbeff122006-03-20 22:52:32 -08001038 const struct inet_connection_sock *icsk = inet_csk(sk);
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001039
Ian Morris00db4122015-04-03 09:17:27 +01001040 if (icsk->icsk_af_ops->compat_getsockopt)
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001041 return icsk->icsk_af_ops->compat_getsockopt(sk, level, optname,
1042 optval, optlen);
1043 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
1044 optval, optlen);
1045}
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001046EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt);
1047
1048int inet_csk_compat_setsockopt(struct sock *sk, int level, int optname,
David S. Millerb7058842009-09-30 16:12:20 -07001049 char __user *optval, unsigned int optlen)
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001050{
David S. Millerdbeff122006-03-20 22:52:32 -08001051 const struct inet_connection_sock *icsk = inet_csk(sk);
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001052
Ian Morris00db4122015-04-03 09:17:27 +01001053 if (icsk->icsk_af_ops->compat_setsockopt)
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001054 return icsk->icsk_af_ops->compat_setsockopt(sk, level, optname,
1055 optval, optlen);
1056 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1057 optval, optlen);
1058}
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001059EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt);
1060#endif
David S. Miller80d0a692012-07-16 03:28:06 -07001061
1062static struct dst_entry *inet_csk_rebuild_route(struct sock *sk, struct flowi *fl)
1063{
Eric Dumazet5abf7f72012-07-17 22:42:13 +02001064 const struct inet_sock *inet = inet_sk(sk);
1065 const struct ip_options_rcu *inet_opt;
David S. Miller80d0a692012-07-16 03:28:06 -07001066 __be32 daddr = inet->inet_daddr;
1067 struct flowi4 *fl4;
1068 struct rtable *rt;
1069
1070 rcu_read_lock();
1071 inet_opt = rcu_dereference(inet->inet_opt);
1072 if (inet_opt && inet_opt->opt.srr)
1073 daddr = inet_opt->opt.faddr;
1074 fl4 = &fl->u.ip4;
1075 rt = ip_route_output_ports(sock_net(sk), fl4, sk, daddr,
1076 inet->inet_saddr, inet->inet_dport,
1077 inet->inet_sport, sk->sk_protocol,
1078 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
1079 if (IS_ERR(rt))
1080 rt = NULL;
1081 if (rt)
1082 sk_setup_caps(sk, &rt->dst);
1083 rcu_read_unlock();
1084
1085 return &rt->dst;
1086}
1087
1088struct dst_entry *inet_csk_update_pmtu(struct sock *sk, u32 mtu)
1089{
1090 struct dst_entry *dst = __sk_dst_check(sk, 0);
1091 struct inet_sock *inet = inet_sk(sk);
1092
1093 if (!dst) {
1094 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1095 if (!dst)
1096 goto out;
1097 }
David S. Miller6700c272012-07-17 03:29:28 -07001098 dst->ops->update_pmtu(dst, sk, NULL, mtu);
David S. Miller80d0a692012-07-16 03:28:06 -07001099
1100 dst = __sk_dst_check(sk, 0);
1101 if (!dst)
1102 dst = inet_csk_rebuild_route(sk, &inet->cork.fl);
1103out:
1104 return dst;
1105}
1106EXPORT_SYMBOL_GPL(inet_csk_update_pmtu);