blob: eaf09e6b78442e09a17ae40d612864f1915572f7 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09007 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09009 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * linux/net/ipv4/tcp.c
11 * linux/net/ipv4/tcp_input.c
12 * linux/net/ipv4/tcp_output.c
13 *
14 * Fixes:
15 * Hideaki YOSHIFUJI : sin6_scope_id support
16 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
17 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
18 * a single port at the same time.
19 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
21
Herbert Xueb4dea52008-12-29 23:04:08 -080022#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/socket.h>
27#include <linux/sockios.h>
28#include <linux/net.h>
29#include <linux/jiffies.h>
30#include <linux/in.h>
31#include <linux/in6.h>
32#include <linux/netdevice.h>
33#include <linux/init.h>
34#include <linux/jhash.h>
35#include <linux/ipsec.h>
36#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080038#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ipv6.h>
40#include <linux/icmpv6.h>
41#include <linux/random.h>
Paolo Abeni0e219ae2019-05-03 17:01:37 +020042#include <linux/indirect_call_wrapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/tcp.h>
45#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030046#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080047#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <net/ipv6.h>
49#include <net/transp_v6.h>
50#include <net/addrconf.h>
51#include <net/ip6_route.h>
52#include <net/ip6_checksum.h>
53#include <net/inet_ecn.h>
54#include <net/protocol.h>
55#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <net/snmp.h>
57#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080058#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070059#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070060#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030061#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <linux/proc_fs.h>
64#include <linux/seq_file.h>
65
Herbert Xucf80e0e2016-01-24 21:20:23 +080066#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080067#include <linux/scatterlist.h>
68
Song Liuc24b14c42017-10-23 09:20:24 -070069#include <trace/events/tcp.h>
70
Eric Dumazeta00e7442015-09-29 07:42:39 -070071static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
72static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070073 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Stephen Hemminger3b401a82009-09-01 19:25:04 +000077static const struct inet_connection_sock_af_ops ipv6_mapped;
Mat Martineau35b2c322020-01-09 07:59:21 -080078const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080079#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +000080static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
81static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090082#else
Eric Dumazet51723932015-09-29 21:24:05 -070083static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
David Aherndea53bb2019-12-30 14:14:28 -080084 const struct in6_addr *addr,
85 int l3index)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090086{
87 return NULL;
88}
David S. Millera9286302006-11-14 19:53:22 -080089#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Eric Dumazet93a77c12019-03-19 07:01:08 -070091/* Helper returning the inet6 address from a given tcp socket.
92 * It can be used in TCP stack instead of inet6_sk(sk).
93 * This avoids a dereference and allow compiler optimizations.
Eric Dumazetf5d54762019-04-01 03:09:20 -070094 * It is a specialized version of inet6_sk_generic().
Eric Dumazet93a77c12019-03-19 07:01:08 -070095 */
96static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
97{
Eric Dumazetf5d54762019-04-01 03:09:20 -070098 unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
Eric Dumazet93a77c12019-03-19 07:01:08 -070099
Eric Dumazetf5d54762019-04-01 03:09:20 -0700100 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700101}
102
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000103static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104{
105 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000106
Eric Dumazet5037e9e2015-12-14 14:08:53 -0800107 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -0700108 const struct rt6_info *rt = (const struct rt6_info *)dst;
109
Eric Dumazetca777ef2014-09-08 08:06:07 -0700110 sk->sk_rx_dst = dst;
111 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Eric Dumazet93a77c12019-03-19 07:01:08 -0700112 tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700113 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000114}
115
Eric Dumazet84b114b2017-05-05 06:56:54 -0700116static u32 tcp_v6_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700118 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
122}
123
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700124static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700125{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700126 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700127 ipv6_hdr(skb)->saddr.s6_addr32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700130static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
131 int addr_len)
132{
133 /* This check is replicated from tcp_v6_connect() and intended to
134 * prevent BPF program called below from accessing bytes that are out
135 * of the bound specified by user in addr_len.
136 */
137 if (addr_len < SIN6_LEN_RFC2133)
138 return -EINVAL;
139
140 sock_owned_by_me(sk);
141
142 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
143}
144
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900145static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 int addr_len)
147{
148 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900149 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800150 struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700151 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000153 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800154 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500155 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 struct dst_entry *dst;
157 int addr_type;
158 int err;
Haishuang Yan1946e672016-12-28 17:52:32 +0800159 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900161 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 return -EINVAL;
163
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900164 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000165 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
David S. Miller4c9483b2011-03-12 16:22:43 -0500167 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500170 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
171 IP6_ECN_flow_init(fl6.flowlabel);
172 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500174 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Willem de Bruijn59c820b2019-07-07 05:34:45 -0400175 if (IS_ERR(flowlabel))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 fl6_sock_release(flowlabel);
178 }
179 }
180
181 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900182 * connect() to INADDR_ANY means loopback (BSD'ism).
183 */
184
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500185 if (ipv6_addr_any(&usin->sin6_addr)) {
186 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
187 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
188 &usin->sin6_addr);
189 else
190 usin->sin6_addr = in6addr_loopback;
191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193 addr_type = ipv6_addr_type(&usin->sin6_addr);
194
Weilong Chen4c99aa42013-12-19 18:44:34 +0800195 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 return -ENETUNREACH;
197
198 if (addr_type&IPV6_ADDR_LINKLOCAL) {
199 if (addr_len >= sizeof(struct sockaddr_in6) &&
200 usin->sin6_scope_id) {
201 /* If interface is set while binding, indices
202 * must coincide.
203 */
David Ahern54dc3e32018-01-04 14:03:54 -0800204 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 return -EINVAL;
206
207 sk->sk_bound_dev_if = usin->sin6_scope_id;
208 }
209
210 /* Connect to link-local address requires an interface */
211 if (!sk->sk_bound_dev_if)
212 return -EINVAL;
213 }
214
215 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700216 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 tp->rx_opt.ts_recent = 0;
218 tp->rx_opt.ts_recent_stamp = 0;
Eric Dumazet0f317462019-10-10 20:17:41 -0700219 WRITE_ONCE(tp->write_seq, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
221
Eric Dumazetefe42082013-10-03 15:42:29 -0700222 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500223 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 /*
226 * TCP over IPv4
227 */
228
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500229 if (addr_type & IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800230 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 struct sockaddr_in sin;
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 if (__ipv6_only_sock(sk))
234 return -ENETUNREACH;
235
236 sin.sin_family = AF_INET;
237 sin.sin_port = usin->sin6_port;
238 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
239
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800240 icsk->icsk_af_ops = &ipv6_mapped;
Peter Krystadcec37a62020-01-21 16:56:18 -0800241 if (sk_is_mptcp(sk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +0100242 mptcpv6_handle_mapped(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800244#ifdef CONFIG_TCP_MD5SIG
245 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
246#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
249
250 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800251 icsk->icsk_ext_hdr_len = exthdrlen;
252 icsk->icsk_af_ops = &ipv6_specific;
Peter Krystadcec37a62020-01-21 16:56:18 -0800253 if (sk_is_mptcp(sk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +0100254 mptcpv6_handle_mapped(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800256#ifdef CONFIG_TCP_MD5SIG
257 tp->af_specific = &tcp_sock_ipv6_specific;
258#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700261 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263 return err;
264 }
265
Eric Dumazetefe42082013-10-03 15:42:29 -0700266 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
267 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
David S. Miller4c9483b2011-03-12 16:22:43 -0500269 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700270 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000271 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500272 fl6.flowi6_oif = sk->sk_bound_dev_if;
273 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500274 fl6.fl6_dport = usin->sin6_port;
275 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900276 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200278 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800279 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
David S. Miller4c9483b2011-03-12 16:22:43 -0500281 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700282
Sabrina Dubrocac4e85f72019-12-04 15:35:52 +0100283 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800284 if (IS_ERR(dst)) {
285 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Ian Morris63159f22015-03-29 14:00:04 +0100289 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500290 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700291 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 }
293
294 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000295 np->saddr = *saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000296 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700298 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800299 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800301 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800302 if (opt)
303 icsk->icsk_ext_hdr_len = opt->opt_flen +
304 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
307
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000308 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800311 err = inet6_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 if (err)
313 goto late_failure;
314
Tom Herbert877d1f62015-07-28 16:02:05 -0700315 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530316
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300317 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300318 if (!tp->write_seq)
Eric Dumazet0f317462019-10-10 20:17:41 -0700319 WRITE_ONCE(tp->write_seq,
320 secure_tcpv6_seq(np->saddr.s6_addr32,
321 sk->sk_v6_daddr.s6_addr32,
322 inet->inet_sport,
323 inet->inet_dport));
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700324 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
325 np->saddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700326 sk->sk_v6_daddr.s6_addr32);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Wei Wang19f6d3f2017-01-23 10:59:22 -0800329 if (tcp_fastopen_defer_connect(sk, &err))
330 return err;
331 if (err)
332 goto late_failure;
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 err = tcp_connect(sk);
335 if (err)
336 goto late_failure;
337
338 return 0;
339
340late_failure:
341 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342failure:
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000343 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 sk->sk_route_caps = 0;
345 return err;
346}
347
Eric Dumazet563d34d2012-07-23 09:48:52 +0200348static void tcp_v6_mtu_reduced(struct sock *sk)
349{
350 struct dst_entry *dst;
351
352 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
353 return;
354
355 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
356 if (!dst)
357 return;
358
359 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
360 tcp_sync_mss(sk, dst_mtu(dst));
361 tcp_simple_retransmit(sk);
362 }
363}
364
Stefano Brivio32bbd872018-11-08 12:19:21 +0100365static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700366 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800368 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300369 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700370 struct net *net = dev_net(skb->dev);
371 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700373 struct tcp_sock *tp;
374 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800376 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Eric Dumazet22150892015-03-22 10:22:23 -0700379 sk = __inet6_lookup_established(net, &tcp_hashinfo,
380 &hdr->daddr, th->dest,
381 &hdr->saddr, ntohs(th->source),
David Ahern4297a0e2017-08-07 08:44:21 -0700382 skb->dev->ifindex, inet6_sdif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
Eric Dumazet22150892015-03-22 10:22:23 -0700384 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700385 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
386 ICMP6_MIB_INERRORS);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100387 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 }
389
390 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700391 inet_twsk_put(inet_twsk(sk));
Stefano Brivio32bbd872018-11-08 12:19:21 +0100392 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 }
Eric Dumazet22150892015-03-22 10:22:23 -0700394 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800395 fatal = icmpv6_err_convert(type, code, &err);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100396 if (sk->sk_state == TCP_NEW_SYN_RECV) {
397 tcp_req_err(sk, seq, fatal);
398 return 0;
399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200402 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700403 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 if (sk->sk_state == TCP_CLOSE)
406 goto out;
407
Eric Dumazet93a77c12019-03-19 07:01:08 -0700408 if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700409 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700410 goto out;
411 }
412
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700414 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
Eric Dumazetd983ea62019-10-10 20:17:38 -0700415 fastopen = rcu_dereference(tp->fastopen_rsk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700416 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700418 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700419 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 goto out;
421 }
422
Eric Dumazet93a77c12019-03-19 07:01:08 -0700423 np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
David S. Millerec18d9a2012-07-12 00:25:15 -0700425 if (type == NDISC_REDIRECT) {
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100426 if (!sock_owned_by_user(sk)) {
427 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
David S. Millerec18d9a2012-07-12 00:25:15 -0700428
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100429 if (dst)
430 dst->ops->redirect(dst, sk, skb);
431 }
Christoph Paasch50a75a82013-04-07 04:53:15 +0000432 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700433 }
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000436 /* We are not interested in TCP_LISTEN and open_requests
437 * (SYN-ACKs send out by Linux are always <576bytes so
438 * they should go through unfragmented).
439 */
440 if (sk->sk_state == TCP_LISTEN)
441 goto out;
442
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100443 if (!ip6_sk_accept_pmtu(sk))
444 goto out;
445
Eric Dumazet563d34d2012-07-23 09:48:52 +0200446 tp->mtu_info = ntohl(info);
447 if (!sock_owned_by_user(sk))
448 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000449 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800450 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000451 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 goto out;
453 }
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700456 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700459 case TCP_SYN_RECV:
460 /* Only in fast or simultaneous open. If a fast open socket is
461 * is already accepted it is treated as a connected one below.
462 */
Ian Morris63159f22015-03-29 14:00:04 +0100463 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700464 break;
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 sk->sk_err = err;
468 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
469
470 tcp_done(sk);
471 } else
472 sk->sk_err_soft = err;
473 goto out;
474 }
475
476 if (!sock_owned_by_user(sk) && np->recverr) {
477 sk->sk_err = err;
478 sk->sk_error_report(sk);
479 } else
480 sk->sk_err_soft = err;
481
482out:
483 bh_unlock_sock(sk);
484 sock_put(sk);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100485 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486}
487
488
Eric Dumazet0f935db2015-09-25 07:39:21 -0700489static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300490 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000491 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700492 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700493 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700495 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700496 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400497 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300498 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800499 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000500 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000502 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700503 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
504 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800505 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000506
Eric Dumazetb3d05142016-04-13 22:05:39 -0700507 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Neal Cardwell94942182012-06-28 12:34:20 +0000508
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700510 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
511 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512
Eric Dumazet634fb9792013-10-09 15:21:29 -0700513 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100514 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100515 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
516
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800517 rcu_read_lock();
Huw Davies56ac42b2016-06-27 15:05:28 -0400518 opt = ireq->ipv6_opt;
519 if (!opt)
520 opt = rcu_dereference(np->opt);
Eric Dumazet4f6570d2019-09-24 08:01:14 -0700521 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass,
522 sk->sk_priority);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800523 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200524 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 }
526
527done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 return err;
529}
530
Octavian Purdila72659ec2010-01-17 19:09:39 -0800531
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700532static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533{
Huw Davies56ac42b2016-06-27 15:05:28 -0400534 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700535 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536}
537
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800538#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700539static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
David Aherndea53bb2019-12-30 14:14:28 -0800540 const struct in6_addr *addr,
541 int l3index)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800542{
David Aherndea53bb2019-12-30 14:14:28 -0800543 return tcp_md5_do_lookup(sk, l3index,
544 (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800545}
546
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700547static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700548 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800549{
David Aherndea53bb2019-12-30 14:14:28 -0800550 int l3index;
551
552 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
553 addr_sk->sk_bound_dev_if);
554 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
555 l3index);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800556}
557
Ivan Delalande8917a772017-06-15 18:07:07 -0700558static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
559 char __user *optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800560{
561 struct tcp_md5sig cmd;
562 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
David Aherndea53bb2019-12-30 14:14:28 -0800563 int l3index = 0;
Ivan Delalande8917a772017-06-15 18:07:07 -0700564 u8 prefixlen;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800565
566 if (optlen < sizeof(cmd))
567 return -EINVAL;
568
569 if (copy_from_user(&cmd, optval, sizeof(cmd)))
570 return -EFAULT;
571
572 if (sin6->sin6_family != AF_INET6)
573 return -EINVAL;
574
Ivan Delalande8917a772017-06-15 18:07:07 -0700575 if (optname == TCP_MD5SIG_EXT &&
576 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
577 prefixlen = cmd.tcpm_prefixlen;
578 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
579 prefixlen > 32))
580 return -EINVAL;
581 } else {
582 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
583 }
584
David Ahern6b102db2019-12-30 14:14:29 -0800585 if (optname == TCP_MD5SIG_EXT &&
586 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
587 struct net_device *dev;
588
589 rcu_read_lock();
590 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
591 if (dev && netif_is_l3_master(dev))
592 l3index = dev->ifindex;
593 rcu_read_unlock();
594
595 /* ok to reference set/not set outside of rcu;
596 * right now device MUST be an L3 master
597 */
598 if (!dev || !l3index)
599 return -EINVAL;
600 }
601
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800602 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700603 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000604 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
David Ahern6b102db2019-12-30 14:14:29 -0800605 AF_INET, prefixlen,
606 l3index);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000607 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
David Aherndea53bb2019-12-30 14:14:28 -0800608 AF_INET6, prefixlen, l3index);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800609 }
610
611 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
612 return -EINVAL;
613
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000614 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
615 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
David Aherndea53bb2019-12-30 14:14:28 -0800616 AF_INET, prefixlen, l3index,
617 cmd.tcpm_key, cmd.tcpm_keylen,
618 GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800619
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000620 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
David Aherndea53bb2019-12-30 14:14:28 -0800621 AF_INET6, prefixlen, l3index,
622 cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800623}
624
Eric Dumazet19689e32016-06-27 18:51:53 +0200625static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
626 const struct in6_addr *daddr,
627 const struct in6_addr *saddr,
628 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800629{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800630 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700631 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200632 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900633
Eric Dumazet19689e32016-06-27 18:51:53 +0200634 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800635 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000636 bp->saddr = *saddr;
637 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700638 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700639 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800640
Eric Dumazet19689e32016-06-27 18:51:53 +0200641 _th = (struct tcphdr *)(bp + 1);
642 memcpy(_th, th, sizeof(*th));
643 _th->check = 0;
644
645 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
646 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
647 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800648 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700649}
David S. Millerc7da57a2007-10-26 00:41:21 -0700650
Eric Dumazet19689e32016-06-27 18:51:53 +0200651static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000652 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400653 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700654{
655 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800656 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700657
658 hp = tcp_get_md5sig_pool();
659 if (!hp)
660 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800661 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700662
Herbert Xucf80e0e2016-01-24 21:20:23 +0800663 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700664 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200665 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700666 goto clear_hash;
667 if (tcp_md5_hash_key(hp, key))
668 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800669 ahash_request_set_crypt(req, NULL, md5_hash, 0);
670 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800671 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800672
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800673 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800674 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700675
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800676clear_hash:
677 tcp_put_md5sig_pool();
678clear_hash_noput:
679 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700680 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800681}
682
Eric Dumazet39f8e582015-03-24 15:58:55 -0700683static int tcp_v6_md5_hash_skb(char *md5_hash,
684 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400685 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400686 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800687{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000688 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700689 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800690 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400691 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800692
Eric Dumazet39f8e582015-03-24 15:58:55 -0700693 if (sk) { /* valid for establish/request sockets */
694 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700695 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700696 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000697 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700698 saddr = &ip6h->saddr;
699 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800700 }
Adam Langley49a72df2008-07-19 00:01:42 -0700701
702 hp = tcp_get_md5sig_pool();
703 if (!hp)
704 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800705 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700706
Herbert Xucf80e0e2016-01-24 21:20:23 +0800707 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700708 goto clear_hash;
709
Eric Dumazet19689e32016-06-27 18:51:53 +0200710 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700711 goto clear_hash;
712 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
713 goto clear_hash;
714 if (tcp_md5_hash_key(hp, key))
715 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800716 ahash_request_set_crypt(req, NULL, md5_hash, 0);
717 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700718 goto clear_hash;
719
720 tcp_put_md5sig_pool();
721 return 0;
722
723clear_hash:
724 tcp_put_md5sig_pool();
725clear_hash_noput:
726 memset(md5_hash, 0, 16);
727 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800728}
729
Eric Dumazetba8e2752015-10-02 11:43:28 -0700730#endif
731
732static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
David Ahernd14c77e2019-12-30 14:14:26 -0800733 const struct sk_buff *skb,
734 int dif, int sdif)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800735{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700736#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400737 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800738 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000739 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400740 const struct tcphdr *th = tcp_hdr(skb);
David Aherndea53bb2019-12-30 14:14:28 -0800741 int genhash, l3index;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800742 u8 newhash[16];
743
David Aherndea53bb2019-12-30 14:14:28 -0800744 /* sdif set, means packet ingressed via a device
745 * in an L3 domain and dif is set to the l3mdev
746 */
747 l3index = sdif ? dif : 0;
748
749 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr, l3index);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900750 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800751
David S. Miller785957d2008-07-30 03:03:15 -0700752 /* We've parsed the options - do we have a hash? */
753 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700754 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700755
756 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700757 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700758 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800759 }
760
David S. Miller785957d2008-07-30 03:03:15 -0700761 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700762 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700763 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800764 }
765
766 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700767 genhash = tcp_v6_md5_hash_skb(newhash,
768 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700769 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700770
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800771 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700772 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
David Aherndea53bb2019-12-30 14:14:28 -0800773 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
Joe Perchese87cc472012-05-13 21:56:26 +0000774 genhash ? "failed" : "mismatch",
775 &ip6h->saddr, ntohs(th->source),
David Aherndea53bb2019-12-30 14:14:28 -0800776 &ip6h->daddr, ntohs(th->dest), l3index);
Eric Dumazetff74e232015-03-24 15:58:54 -0700777 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800778 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700779#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700780 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800781}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800782
Eric Dumazetb40cf182015-09-25 07:39:08 -0700783static void tcp_v6_init_req(struct request_sock *req,
784 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300785 struct sk_buff *skb)
786{
David Ahernc2027d12018-12-12 15:27:38 -0800787 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
Octavian Purdila16bea702014-06-25 17:09:53 +0300788 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700789 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300790
791 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
792 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
793
Octavian Purdila16bea702014-06-25 17:09:53 +0300794 /* So that link locals have meaning */
David Ahernc2027d12018-12-12 15:27:38 -0800795 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300796 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700797 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300798
Eric Dumazet04317da2014-09-05 15:33:32 -0700799 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700800 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700801 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300802 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
803 np->rxopt.bits.rxohlim || np->repflow)) {
Reshetova, Elena63354792017-06-30 13:07:58 +0300804 refcount_inc(&skb->users);
Octavian Purdila16bea702014-06-25 17:09:53 +0300805 ireq->pktopts = skb;
806 }
807}
808
Eric Dumazetf9646292015-09-29 07:42:50 -0700809static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
810 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -0400811 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +0300812{
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700813 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300814}
815
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800816struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700818 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300819 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700820 .send_ack = tcp_v6_reqsk_send_ack,
821 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800822 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800823 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824};
825
Mat Martineau35b2c322020-01-09 07:59:21 -0800826const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300827 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
828 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300829#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700830 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000831 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800832#endif
Octavian Purdila16bea702014-06-25 17:09:53 +0300833 .init_req = tcp_v6_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300834#ifdef CONFIG_SYN_COOKIES
835 .cookie_init_seq = cookie_v6_init_sequence,
836#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300837 .route_req = tcp_v6_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700838 .init_seq = tcp_v6_init_seq,
839 .init_ts_off = tcp_v6_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300840 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300841};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800842
Eric Dumazeta00e7442015-09-29 07:42:39 -0700843static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800844 u32 ack, u32 win, u32 tsval, u32 tsecr,
845 int oif, struct tcp_md5sig_key *key, int rst,
Eric Dumazete9a5dce2019-09-24 08:01:15 -0700846 u8 tclass, __be32 label, u32 priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400848 const struct tcphdr *th = tcp_hdr(skb);
849 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500851 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800852 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800853 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800854 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +0000855 struct dst_entry *dst;
Al Viroe69a4ad2006-11-14 20:56:00 -0800856 __be32 *topt;
Jon Maxwell00483692018-05-10 16:53:51 +1000857 __u32 mark = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
Andrey Vaginee684b62013-02-11 05:50:19 +0000859 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700860 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800861#ifdef CONFIG_TCP_MD5SIG
862 if (key)
863 tot_len += TCPOLEN_MD5SIG_ALIGNED;
864#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
867 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100868 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 return;
870
871 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
872
Johannes Bergd58ff352017-06-16 14:29:23 +0200873 t1 = skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700874 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
876 /* Swap the send and the receive. */
877 memset(t1, 0, sizeof(*t1));
878 t1->dest = th->source;
879 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700880 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 t1->seq = htonl(seq);
882 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700883 t1->ack = !rst || !th->ack;
884 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800886
Al Viroe69a4ad2006-11-14 20:56:00 -0800887 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900888
Andrey Vaginee684b62013-02-11 05:50:19 +0000889 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800890 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
891 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000892 *topt++ = htonl(tsval);
893 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 }
895
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800896#ifdef CONFIG_TCP_MD5SIG
897 if (key) {
898 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
899 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700900 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700901 &ipv6_hdr(skb)->saddr,
902 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800903 }
904#endif
905
David S. Miller4c9483b2011-03-12 16:22:43 -0500906 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000907 fl6.daddr = ipv6_hdr(skb)->saddr;
908 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100909 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
David S. Millere5700af2010-04-21 14:59:20 -0700911 buff->ip_summed = CHECKSUM_PARTIAL;
912 buff->csum = 0;
913
David S. Miller4c9483b2011-03-12 16:22:43 -0500914 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
David S. Miller4c9483b2011-03-12 16:22:43 -0500916 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900917 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700918 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800919 else {
920 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
921 oif = skb->skb_iif;
922
923 fl6.flowi6_oif = oif;
924 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700925
Eric Dumazetc67b8552019-06-08 17:58:51 -0700926 if (sk) {
927 if (sk->sk_state == TCP_TIME_WAIT) {
928 mark = inet_twsk(sk)->tw_mark;
929 /* autoflowlabel relies on buff->hash */
930 skb_set_hash(buff, inet_twsk(sk)->tw_txhash,
931 PKT_HASH_TYPE_L4);
932 } else {
933 mark = sk->sk_mark;
934 }
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700935 buff->tstamp = tcp_transmit_time(sk);
Eric Dumazetc67b8552019-06-08 17:58:51 -0700936 }
Jon Maxwell00483692018-05-10 16:53:51 +1000937 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
David S. Miller1958b852011-03-12 16:36:19 -0500938 fl6.fl6_dport = t1->dest;
939 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900940 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
David S. Miller4c9483b2011-03-12 16:22:43 -0500941 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700943 /* Pass a socket to ip6_dst_lookup either it is for RST
944 * Underlying function will use this to retrieve the network
945 * namespace
946 */
Sabrina Dubrocac4e85f72019-12-04 15:35:52 +0100947 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800948 if (!IS_ERR(dst)) {
949 skb_dst_set(buff, dst);
Eric Dumazet4f6570d2019-09-24 08:01:14 -0700950 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass,
Eric Dumazete9a5dce2019-09-24 08:01:15 -0700951 priority);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700952 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800953 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700954 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800955 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 }
957
958 kfree_skb(buff);
959}
960
Eric Dumazeta00e7442015-09-29 07:42:39 -0700961static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700962{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400963 const struct tcphdr *th = tcp_hdr(skb);
Eric Dumazet323a53c2019-06-05 07:55:09 -0700964 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700965 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -0700966 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000967#ifdef CONFIG_TCP_MD5SIG
968 const __u8 *hash_location = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000969 unsigned char newhash[16];
970 int genhash;
971 struct sock *sk1 = NULL;
972#endif
Eric Dumazet323a53c2019-06-05 07:55:09 -0700973 __be32 label = 0;
Eric Dumazete9a5dce2019-09-24 08:01:15 -0700974 u32 priority = 0;
Eric Dumazet323a53c2019-06-05 07:55:09 -0700975 struct net *net;
Song Liuc24b14c42017-10-23 09:20:24 -0700976 int oif = 0;
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700977
978 if (th->rst)
979 return;
980
Eric Dumazetc3658e82014-11-25 07:40:04 -0800981 /* If sk not NULL, it means we did a successful lookup and incoming
982 * route had to be correct. prequeue might have dropped our dst.
983 */
984 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700985 return;
986
Eric Dumazet39209672019-06-07 12:23:48 -0700987 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700988#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700989 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000990 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100991 if (sk && sk_fullsock(sk)) {
David Aherndea53bb2019-12-30 14:14:28 -0800992 int l3index;
993
994 /* sdif set, means packet ingressed via a device
995 * in an L3 domain and inet_iif is set to it.
996 */
997 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
998 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
Florian Westphale46787f2015-12-21 21:29:25 +0100999 } else if (hash_location) {
David Ahernd14c77e2019-12-30 14:14:26 -08001000 int dif = tcp_v6_iif_l3_slave(skb);
1001 int sdif = tcp_v6_sdif(skb);
David Aherndea53bb2019-12-30 14:14:28 -08001002 int l3index;
David Ahernd14c77e2019-12-30 14:14:26 -08001003
Shawn Lu658ddaa2012-01-31 22:35:48 +00001004 /*
1005 * active side is lost. Try to find listening socket through
1006 * source port, and then find md5 key through listening socket.
1007 * we are not loose security here:
1008 * Incoming packet is checked with md5 hash with finding key,
1009 * no RST generated if md5 hash doesn't match.
1010 */
Eric Dumazet323a53c2019-06-05 07:55:09 -07001011 sk1 = inet6_lookup_listener(net,
Craig Galleka5836362016-02-10 11:50:38 -05001012 &tcp_hashinfo, NULL, 0,
1013 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +00001014 th->source, &ipv6h->daddr,
David Ahernd14c77e2019-12-30 14:14:26 -08001015 ntohs(th->source), dif, sdif);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001016 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001017 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001018
David Aherndea53bb2019-12-30 14:14:28 -08001019 /* sdif set, means packet ingressed via a device
1020 * in an L3 domain and dif is set to it.
1021 */
1022 l3index = tcp_v6_sdif(skb) ? dif : 0;
1023
1024 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001025 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001026 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001027
Eric Dumazet39f8e582015-03-24 15:58:55 -07001028 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001029 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001030 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001031 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001032#endif
1033
1034 if (th->ack)
1035 seq = ntohl(th->ack_seq);
1036 else
1037 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1038 (th->doff << 2);
1039
Song Liuc24b14c42017-10-23 09:20:24 -07001040 if (sk) {
1041 oif = sk->sk_bound_dev_if;
Eric Dumazet052e0692019-07-10 06:40:09 -07001042 if (sk_fullsock(sk)) {
1043 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1044
Song Liu5c487bb2018-02-06 20:50:23 -08001045 trace_tcp_send_reset(sk, skb);
Eric Dumazet052e0692019-07-10 06:40:09 -07001046 if (np->repflow)
1047 label = ip6_flowlabel(ipv6h);
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001048 priority = sk->sk_priority;
Eric Dumazet052e0692019-07-10 06:40:09 -07001049 }
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001050 if (sk->sk_state == TCP_TIME_WAIT) {
Eric Dumazet50a8acc2019-06-05 07:55:10 -07001051 label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001052 priority = inet_twsk(sk)->tw_priority;
1053 }
Eric Dumazet323a53c2019-06-05 07:55:09 -07001054 } else {
Eric Dumazeta346abe2019-07-01 06:39:36 -07001055 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
Eric Dumazet323a53c2019-06-05 07:55:09 -07001056 label = ip6_flowlabel(ipv6h);
Song Liuc24b14c42017-10-23 09:20:24 -07001057 }
1058
Eric Dumazet323a53c2019-06-05 07:55:09 -07001059 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0,
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001060 label, priority);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001061
1062#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -07001063out:
1064 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +00001065#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001066}
1067
Eric Dumazeta00e7442015-09-29 07:42:39 -07001068static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001069 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +01001070 struct tcp_md5sig_key *key, u8 tclass,
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001071 __be32 label, u32 priority)
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001072{
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001073 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001074 tclass, label, priority);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001075}
1076
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1078{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001079 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001080 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001082 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001083 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001084 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +08001085 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001086 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001088 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089}
1090
Eric Dumazeta00e7442015-09-29 07:42:39 -07001091static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -07001092 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093{
David Aherndea53bb2019-12-30 14:14:28 -08001094 int l3index;
1095
1096 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1097
Daniel Lee3a19ce02014-05-11 20:22:13 -07001098 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1099 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1100 */
Eric Dumazet20a2b492016-08-22 11:31:10 -07001101 /* RFC 7323 2.3
1102 * The window field (SEG.WND) of every outgoing segment, with the
1103 * exception of <SYN> segments, MUST be right-shifted by
1104 * Rcv.Wind.Shift bits:
1105 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001106 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -07001107 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -07001108 tcp_rsk(req)->rcv_nxt,
1109 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001110 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
Florian Westphal95a22ca2016-12-01 11:32:06 +01001111 req->ts_recent, sk->sk_bound_dev_if,
David Aherndea53bb2019-12-30 14:14:28 -08001112 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001113 0, 0, sk->sk_priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114}
1115
1116
Eric Dumazet079096f2015-10-02 11:43:32 -07001117static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001119#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001120 const struct tcphdr *th = tcp_hdr(skb);
1121
Florian Westphalaf9b4732010-06-03 00:43:44 +00001122 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001123 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124#endif
1125 return sk;
1126}
1127
Petar Penkov9349d602019-07-29 09:59:14 -07001128u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1129 struct tcphdr *th, u32 *cookie)
1130{
1131 u16 mss = 0;
1132#ifdef CONFIG_SYN_COOKIES
1133 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1134 &tcp_request_sock_ipv6_ops, sk, th);
1135 if (mss) {
1136 *cookie = __cookie_v6_init_sequence(iph, th, &mss);
1137 tcp_synq_overflow(sk);
1138 }
1139#endif
1140 return mss;
1141}
1142
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1144{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 if (skb->protocol == htons(ETH_P_IP))
1146 return tcp_v4_conn_request(sk, skb);
1147
1148 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001149 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001151 return tcp_conn_request(&tcp6_request_sock_ops,
1152 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001155 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 return 0; /* don't send reset */
1157}
1158
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001159static void tcp_v6_restore_cb(struct sk_buff *skb)
1160{
1161 /* We need to move header back to the beginning if xfrm6_policy_check()
1162 * and tcp_v6_fill_cb() are going to be called again.
1163 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1164 */
1165 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1166 sizeof(struct inet6_skb_parm));
1167}
1168
Eric Dumazet0c271712015-09-29 07:42:48 -07001169static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001170 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001171 struct dst_entry *dst,
1172 struct request_sock *req_unhash,
1173 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001175 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001176 struct ipv6_pinfo *newnp;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001177 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001178 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 struct inet_sock *newinet;
1180 struct tcp_sock *newtp;
1181 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001182#ifdef CONFIG_TCP_MD5SIG
1183 struct tcp_md5sig_key *key;
David Aherndea53bb2019-12-30 14:14:28 -08001184 int l3index;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001185#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001186 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
1188 if (skb->protocol == htons(ETH_P_IP)) {
1189 /*
1190 * v6 mapped
1191 */
1192
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001193 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1194 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Ian Morris63159f22015-03-29 14:00:04 +01001196 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 return NULL;
1198
Eric Dumazet93a77c12019-03-19 07:01:08 -07001199 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
1201 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001202 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 newtp = tcp_sk(newsk);
1204
1205 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1206
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001207 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001209 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Peter Krystadcec37a62020-01-21 16:56:18 -08001210 if (sk_is_mptcp(newsk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001211 mptcpv6_handle_mapped(newsk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001213#ifdef CONFIG_TCP_MD5SIG
1214 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1215#endif
1216
WANG Cong83eadda2017-05-09 16:59:54 -07001217 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001218 newnp->ipv6_ac_list = NULL;
1219 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 newnp->pktoptions = NULL;
1221 newnp->opt = NULL;
Eric Dumazet89e41302019-03-19 05:45:35 -07001222 newnp->mcast_oif = inet_iif(skb);
1223 newnp->mcast_hops = ip_hdr(skb)->ttl;
1224 newnp->rcv_flowinfo = 0;
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001225 if (np->repflow)
Eric Dumazet89e41302019-03-19 05:45:35 -07001226 newnp->flow_label = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001228 /*
1229 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1230 * here, tcp_create_openreq_child now does this for us, see the comment in
1231 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233
1234 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001235 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 Sync it now.
1237 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001238 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239
1240 return newsk;
1241 }
1242
Eric Dumazet634fb9792013-10-09 15:21:29 -07001243 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244
1245 if (sk_acceptq_is_full(sk))
1246 goto out_overflow;
1247
David S. Miller493f3772010-12-02 12:14:29 -08001248 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001249 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001250 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
1254 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001255 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001256 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001258 /*
1259 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1260 * count here, tcp_create_openreq_child now does this for us, see the
1261 * comment in that function for the gory details. -acme
1262 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
Stephen Hemminger59eed272006-08-25 15:55:43 -07001264 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001265 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001266 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
Eric Dumazet93a77c12019-03-19 07:01:08 -07001268 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269
1270 newtp = tcp_sk(newsk);
1271 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001272 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273
1274 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1275
Eric Dumazet634fb9792013-10-09 15:21:29 -07001276 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1277 newnp->saddr = ireq->ir_v6_loc_addr;
1278 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1279 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001281 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
1283 First: no IPv4 options.
1284 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001285 newinet->inet_opt = NULL;
WANG Cong83eadda2017-05-09 16:59:54 -07001286 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001287 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001288 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
1290 /* Clone RX bits */
1291 newnp->rxopt.all = np->rxopt.all;
1292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001295 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001296 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001297 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001298 if (np->repflow)
1299 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300
1301 /* Clone native IPv6 options from listening socket (if any)
1302
1303 Yes, keeping reference count would be much more clever,
1304 but we make one more one thing there: reattach optmem
1305 to newsk.
1306 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001307 opt = ireq->ipv6_opt;
1308 if (!opt)
1309 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001310 if (opt) {
1311 opt = ipv6_dup_options(newsk, opt);
1312 RCU_INIT_POINTER(newnp->opt, opt);
1313 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001314 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001315 if (opt)
1316 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1317 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
Daniel Borkmann81164412015-01-05 23:57:48 +01001319 tcp_ca_openreq_child(newsk, dst);
1320
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001322 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Neal Cardwelld135c522012-04-22 09:45:47 +00001323
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324 tcp_initialize_rcv_mss(newsk);
1325
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001326 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1327 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001329#ifdef CONFIG_TCP_MD5SIG
David Aherndea53bb2019-12-30 14:14:28 -08001330 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1331
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001332 /* Copy over the MD5 key from the original socket */
David Aherndea53bb2019-12-30 14:14:28 -08001333 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
Ian Morris53b24b82015-03-29 14:00:05 +01001334 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001335 /* We're using one, so create a matching key
1336 * on the newsk structure. If we fail to get
1337 * memory, then we end up not copying the key
1338 * across. Shucks.
1339 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001340 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
David Aherndea53bb2019-12-30 14:14:28 -08001341 AF_INET6, 128, l3index, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001342 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001343 }
1344#endif
1345
Balazs Scheidler093d2822010-10-21 13:06:43 +02001346 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001347 inet_csk_prepare_forced_close(newsk);
1348 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001349 goto out;
1350 }
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001351 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001352 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001353 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001354
1355 /* Clone pktoptions received with SYN, if we own the req */
1356 if (ireq->pktopts) {
1357 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001358 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001359 consume_skb(ireq->pktopts);
1360 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001361 if (newnp->pktoptions) {
1362 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001363 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001364 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001365 }
Eric Dumazetce105002015-10-30 09:46:12 -07001366 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
1368 return newsk;
1369
1370out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001371 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001372out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001374out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001375 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 return NULL;
1377}
1378
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001380 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 *
1382 * We have a potential double-lock case here, so even when
1383 * doing backlog processing we use the BH locking scheme.
1384 * This is because we cannot sleep with the original spinlock
1385 * held.
1386 */
1387static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1388{
Eric Dumazet93a77c12019-03-19 07:01:08 -07001389 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 struct sk_buff *opt_skb = NULL;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001391 struct tcp_sock *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392
1393 /* Imagine: socket is IPv6. IPv4 packet arrives,
1394 goes to IPv4 receive handler and backlogged.
1395 From backlog it always goes here. Kerboom...
1396 Fortunately, tcp_rcv_established and rcv_established
1397 handle them correctly, but it is not case with
1398 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1399 */
1400
1401 if (skb->protocol == htons(ETH_P_IP))
1402 return tcp_v4_do_rcv(sk, skb);
1403
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 /*
1405 * socket locking is here for SMP purposes as backlog rcv
1406 * is currently called with bh processing disabled.
1407 */
1408
1409 /* Do Stevens' IPV6_PKTOPTIONS.
1410
1411 Yes, guys, it is the only place in our code, where we
1412 may make it not affecting IPv4.
1413 The rest of code is protocol independent,
1414 and I do not like idea to uglify IPv4.
1415
1416 Actually, all the idea behind IPV6_PKTOPTIONS
1417 looks not very well thought. For now we latch
1418 options, received in the last packet, enqueued
1419 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001420 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 */
1422 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001423 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424
1425 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001426 struct dst_entry *dst = sk->sk_rx_dst;
1427
Tom Herbertbdeab992011-08-14 19:45:55 +00001428 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001429 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001430 if (dst) {
1431 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1432 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1433 dst_release(dst);
1434 sk->sk_rx_dst = NULL;
1435 }
1436 }
1437
Yafang Shao3d97d882018-05-29 23:27:31 +08001438 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 if (opt_skb)
1440 goto ipv6_pktoptions;
1441 return 0;
1442 }
1443
Eric Dumazet12e25e12015-06-03 23:49:21 -07001444 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 goto csum_err;
1446
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001447 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001448 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 if (!nsk)
1451 goto discard;
1452
Weilong Chen4c99aa42013-12-19 18:44:34 +08001453 if (nsk != sk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 if (tcp_child_process(sk, nsk, skb))
1455 goto reset;
1456 if (opt_skb)
1457 __kfree_skb(opt_skb);
1458 return 0;
1459 }
Neil Horman47482f12011-04-06 13:07:09 -07001460 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001461 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001463 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 if (opt_skb)
1466 goto ipv6_pktoptions;
1467 return 0;
1468
1469reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001470 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471discard:
1472 if (opt_skb)
1473 __kfree_skb(opt_skb);
1474 kfree_skb(skb);
1475 return 0;
1476csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001477 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1478 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 goto discard;
1480
1481
1482ipv6_pktoptions:
1483 /* Do you ask, what is it?
1484
1485 1. skb was enqueued by tcp.
1486 2. skb is added to tail of read queue, rather than out of order.
1487 3. socket is not in passive state.
1488 4. Finally, it really contains options, which user wants to receive.
1489 */
1490 tp = tcp_sk(sk);
1491 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1492 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001493 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001494 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001495 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001496 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001497 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001498 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001499 if (np->repflow)
1500 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001501 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001503 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 opt_skb = xchg(&np->pktoptions, opt_skb);
1505 } else {
1506 __kfree_skb(opt_skb);
1507 opt_skb = xchg(&np->pktoptions, NULL);
1508 }
1509 }
1510
Wei Yongjun800d55f2009-02-23 21:45:33 +00001511 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 return 0;
1513}
1514
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001515static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1516 const struct tcphdr *th)
1517{
1518 /* This is tricky: we move IP6CB at its correct location into
1519 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1520 * _decode_session6() uses IP6CB().
1521 * barrier() makes sure compiler won't play aliasing games.
1522 */
1523 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1524 sizeof(struct inet6_skb_parm));
1525 barrier();
1526
1527 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1528 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1529 skb->len - th->doff*4);
1530 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1531 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1532 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1533 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1534 TCP_SKB_CB(skb)->sacked = 0;
Mike Maloney98aaa912017-08-22 17:08:48 -04001535 TCP_SKB_CB(skb)->has_rxtstamp =
1536 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001537}
1538
Paolo Abeni0e219ae2019-05-03 17:01:37 +02001539INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540{
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001541 struct sk_buff *skb_to_free;
David Ahern4297a0e2017-08-07 08:44:21 -07001542 int sdif = inet6_sdif(skb);
David Ahernd14c77e2019-12-30 14:14:26 -08001543 int dif = inet6_iif(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001544 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001545 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001546 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 struct sock *sk;
1548 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001549 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550
1551 if (skb->pkt_type != PACKET_HOST)
1552 goto discard_it;
1553
1554 /*
1555 * Count it even if it's bad.
1556 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001557 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558
1559 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1560 goto discard_it;
1561
Eric Dumazetea1627c2016-05-13 09:16:40 -07001562 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563
Eric Dumazetea1627c2016-05-13 09:16:40 -07001564 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 goto bad_packet;
1566 if (!pskb_may_pull(skb, th->doff*4))
1567 goto discard_it;
1568
Tom Herberte4f45b72014-05-02 16:29:51 -07001569 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001570 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
Eric Dumazetea1627c2016-05-13 09:16:40 -07001572 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001573 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001575lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001576 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
David Ahern4297a0e2017-08-07 08:44:21 -07001577 th->source, th->dest, inet6_iif(skb), sdif,
Eric Dumazet3b24d852016-04-01 08:52:17 -07001578 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 if (!sk)
1580 goto no_tcp_socket;
1581
1582process:
1583 if (sk->sk_state == TCP_TIME_WAIT)
1584 goto do_time_wait;
1585
Eric Dumazet079096f2015-10-02 11:43:32 -07001586 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1587 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001588 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001589 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001590
1591 sk = req->rsk_listener;
David Ahernd14c77e2019-12-30 14:14:26 -08001592 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001593 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001594 reqsk_put(req);
1595 goto discard_it;
1596 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001597 if (tcp_checksum_complete(skb)) {
1598 reqsk_put(req);
1599 goto csum_error;
1600 }
Eric Dumazet77166822016-02-18 05:39:18 -08001601 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001602 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001603 goto lookup;
1604 }
Eric Dumazet77166822016-02-18 05:39:18 -08001605 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001606 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001607 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001608 if (!tcp_filter(sk, skb)) {
1609 th = (const struct tcphdr *)skb->data;
1610 hdr = ipv6_hdr(skb);
1611 tcp_v6_fill_cb(skb, hdr, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001612 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001613 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001614 if (!nsk) {
1615 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001616 if (req_stolen) {
1617 /* Another cpu got exclusive access to req
1618 * and created a full blown socket.
1619 * Try to feed this packet to this socket
1620 * instead of discarding it.
1621 */
1622 tcp_v6_restore_cb(skb);
1623 sock_put(sk);
1624 goto lookup;
1625 }
Eric Dumazet77166822016-02-18 05:39:18 -08001626 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001627 }
1628 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001629 reqsk_put(req);
1630 tcp_v6_restore_cb(skb);
1631 } else if (tcp_child_process(sk, nsk, skb)) {
1632 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001633 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001634 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001635 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001636 return 0;
1637 }
1638 }
Eric Dumazet93a77c12019-03-19 07:01:08 -07001639 if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001640 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001641 goto discard_and_relse;
1642 }
1643
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1645 goto discard_and_relse;
1646
David Ahernd14c77e2019-12-30 14:14:26 -08001647 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif))
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001648 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001649
Eric Dumazetac6e7802016-11-10 13:12:35 -08001650 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001652 th = (const struct tcphdr *)skb->data;
1653 hdr = ipv6_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001654 tcp_v6_fill_cb(skb, hdr, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
1656 skb->dev = NULL;
1657
Eric Dumazete994b2f2015-10-02 11:43:39 -07001658 if (sk->sk_state == TCP_LISTEN) {
1659 ret = tcp_v6_do_rcv(sk, skb);
1660 goto put_and_return;
1661 }
1662
1663 sk_incoming_cpu_update(sk);
1664
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001665 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001666 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 ret = 0;
1668 if (!sock_owned_by_user(sk)) {
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001669 skb_to_free = sk->sk_rx_skb_cache;
1670 sk->sk_rx_skb_cache = NULL;
Florian Westphale7942d02017-07-30 03:57:18 +02001671 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001672 } else {
1673 if (tcp_add_backlog(sk, skb))
1674 goto discard_and_relse;
1675 skb_to_free = NULL;
Zhu Yi6b03a532010-03-04 18:01:41 +00001676 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 bh_unlock_sock(sk);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001678 if (skb_to_free)
1679 __kfree_skb(skb_to_free);
Eric Dumazete994b2f2015-10-02 11:43:39 -07001680put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001681 if (refcounted)
1682 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683 return ret ? -1 : 0;
1684
1685no_tcp_socket:
1686 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1687 goto discard_it;
1688
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001689 tcp_v6_fill_cb(skb, hdr, th);
1690
Eric Dumazet12e25e12015-06-03 23:49:21 -07001691 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001692csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001693 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001695 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001697 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698 }
1699
1700discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 kfree_skb(skb);
1702 return 0;
1703
1704discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001705 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001706 if (refcounted)
1707 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708 goto discard_it;
1709
1710do_time_wait:
1711 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001712 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 goto discard_it;
1714 }
1715
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001716 tcp_v6_fill_cb(skb, hdr, th);
1717
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001718 if (tcp_checksum_complete(skb)) {
1719 inet_twsk_put(inet_twsk(sk));
1720 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 }
1722
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001723 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 case TCP_TW_SYN:
1725 {
1726 struct sock *sk2;
1727
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001728 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001729 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001730 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001731 &ipv6_hdr(skb)->daddr,
David Ahern24b711e2018-07-19 12:41:18 -07001732 ntohs(th->dest),
1733 tcp_v6_iif_l3_slave(skb),
David Ahern4297a0e2017-08-07 08:44:21 -07001734 sdif);
Ian Morris53b24b82015-03-29 14:00:05 +01001735 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001736 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001737 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001739 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001740 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 goto process;
1742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 }
Gustavo A. R. Silva275757e62017-10-16 16:36:52 -05001744 /* to ACK */
1745 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 case TCP_TW_ACK:
1747 tcp_v6_timewait_ack(sk, skb);
1748 break;
1749 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001750 tcp_v6_send_reset(sk, skb);
1751 inet_twsk_deschedule_put(inet_twsk(sk));
1752 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001753 case TCP_TW_SUCCESS:
1754 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 }
1756 goto discard_it;
1757}
1758
Paolo Abeni97ff7ff2019-05-03 17:01:38 +02001759INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
Eric Dumazetc7109982012-07-26 12:18:11 +00001760{
1761 const struct ipv6hdr *hdr;
1762 const struct tcphdr *th;
1763 struct sock *sk;
1764
1765 if (skb->pkt_type != PACKET_HOST)
1766 return;
1767
1768 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1769 return;
1770
1771 hdr = ipv6_hdr(skb);
1772 th = tcp_hdr(skb);
1773
1774 if (th->doff < sizeof(struct tcphdr) / 4)
1775 return;
1776
Eric Dumazet870c3152014-10-17 09:17:20 -07001777 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001778 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1779 &hdr->saddr, th->source,
1780 &hdr->daddr, ntohs(th->dest),
David Ahern4297a0e2017-08-07 08:44:21 -07001781 inet6_iif(skb), inet6_sdif(skb));
Eric Dumazetc7109982012-07-26 12:18:11 +00001782 if (sk) {
1783 skb->sk = sk;
1784 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001785 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001786 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001787
Eric Dumazetc7109982012-07-26 12:18:11 +00001788 if (dst)
Eric Dumazet93a77c12019-03-19 07:01:08 -07001789 dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001790 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001791 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001792 skb_dst_set_noref(skb, dst);
1793 }
1794 }
1795}
1796
David S. Millerccb7c412010-12-01 18:09:13 -08001797static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1798 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1799 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001800 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001801};
1802
Mat Martineau35b2c322020-01-09 07:59:21 -08001803const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001804 .queue_xmit = inet6_csk_xmit,
1805 .send_check = tcp_v6_send_check,
1806 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001807 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001808 .conn_request = tcp_v6_conn_request,
1809 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001810 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001811 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001812 .setsockopt = ipv6_setsockopt,
1813 .getsockopt = ipv6_getsockopt,
1814 .addr2sockaddr = inet6_csk_addr2sockaddr,
1815 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001816#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001817 .compat_setsockopt = compat_ipv6_setsockopt,
1818 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001819#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001820 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821};
1822
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001823#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001824static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001825 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001826 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001827 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001828};
David S. Millera9286302006-11-14 19:53:22 -08001829#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001830
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831/*
1832 * TCP over IPv4 via INET6 API
1833 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001834static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001835 .queue_xmit = ip_queue_xmit,
1836 .send_check = tcp_v4_send_check,
1837 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001838 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001839 .conn_request = tcp_v6_conn_request,
1840 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001841 .net_header_len = sizeof(struct iphdr),
1842 .setsockopt = ipv6_setsockopt,
1843 .getsockopt = ipv6_getsockopt,
1844 .addr2sockaddr = inet6_csk_addr2sockaddr,
1845 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001846#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001847 .compat_setsockopt = compat_ipv6_setsockopt,
1848 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001849#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001850 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851};
1852
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001853#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001854static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001855 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001856 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001857 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001858};
David S. Millera9286302006-11-14 19:53:22 -08001859#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001860
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861/* NOTE: A lot of things set to zero explicitly by call to
1862 * sk_alloc() so need not be done here.
1863 */
1864static int tcp_v6_init_sock(struct sock *sk)
1865{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001866 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867
Neal Cardwell900f65d2012-04-19 09:55:21 +00001868 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001870 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001872#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001873 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001874#endif
1875
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 return 0;
1877}
1878
Brian Haley7d06b2e2008-06-14 17:04:49 -07001879static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001882 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883}
1884
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001885#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001887static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001888 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001890 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001891 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1892 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893
1894 if (ttd < 0)
1895 ttd = 0;
1896
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 seq_printf(seq,
1898 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001899 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 i,
1901 src->s6_addr32[0], src->s6_addr32[1],
1902 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001903 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 dest->s6_addr32[0], dest->s6_addr32[1],
1905 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001906 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001908 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001909 1, /* timers active (only the expire timer) */
1910 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001911 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001912 from_kuid_munged(seq_user_ns(seq),
1913 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001914 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 0, /* open_requests have no inode */
1916 0, req);
1917}
1918
1919static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1920{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001921 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 __u16 destp, srcp;
1923 int timer_active;
1924 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001925 const struct inet_sock *inet = inet_sk(sp);
1926 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001927 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001928 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001929 int rx_queue;
1930 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
Eric Dumazetefe42082013-10-03 15:42:29 -07001932 dest = &sp->sk_v6_daddr;
1933 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001934 destp = ntohs(inet->inet_dport);
1935 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001936
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001937 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08001938 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001939 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001941 timer_expires = icsk->icsk_timeout;
1942 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001944 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 } else if (timer_pending(&sp->sk_timer)) {
1946 timer_active = 2;
1947 timer_expires = sp->sk_timer.expires;
1948 } else {
1949 timer_active = 0;
1950 timer_expires = jiffies;
1951 }
1952
Yafang Shao986ffdf2017-12-20 11:12:52 +08001953 state = inet_sk_state_load(sp);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001954 if (state == TCP_LISTEN)
Eric Dumazet288efe82019-11-05 14:11:53 -08001955 rx_queue = READ_ONCE(sp->sk_ack_backlog);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001956 else
1957 /* Because we don't lock the socket,
1958 * we might find a transient negative value.
1959 */
Eric Dumazetdba7d9b2019-10-10 20:17:39 -07001960 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
Eric Dumazet7db48e92019-10-10 20:17:40 -07001961 READ_ONCE(tp->copied_seq), 0);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001962
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 seq_printf(seq,
1964 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001965 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001966 i,
1967 src->s6_addr32[0], src->s6_addr32[1],
1968 src->s6_addr32[2], src->s6_addr32[3], srcp,
1969 dest->s6_addr32[0], dest->s6_addr32[1],
1970 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001971 state,
Eric Dumazet0f317462019-10-10 20:17:41 -07001972 READ_ONCE(tp->write_seq) - tp->snd_una,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001973 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00001975 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001976 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001977 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001978 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 sock_i_ino(sp),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03001980 refcount_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07001981 jiffies_to_clock_t(icsk->icsk_rto),
1982 jiffies_to_clock_t(icsk->icsk_ack.ato),
Wei Wang31954cd2019-01-25 10:53:19 -08001983 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001984 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001985 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001986 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07001987 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 );
1989}
1990
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001991static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001992 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993{
Eric Dumazet789f5582015-04-12 18:51:09 -07001994 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001995 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997
Eric Dumazetefe42082013-10-03 15:42:29 -07001998 dest = &tw->tw_v6_daddr;
1999 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 destp = ntohs(tw->tw_dport);
2001 srcp = ntohs(tw->tw_sport);
2002
2003 seq_printf(seq,
2004 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00002005 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 i,
2007 src->s6_addr32[0], src->s6_addr32[1],
2008 src->s6_addr32[2], src->s6_addr32[3], srcp,
2009 dest->s6_addr32[0], dest->s6_addr32[1],
2010 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2011 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002012 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002013 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014}
2015
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016static int tcp6_seq_show(struct seq_file *seq, void *v)
2017{
2018 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002019 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020
2021 if (v == SEQ_START_TOKEN) {
2022 seq_puts(seq,
2023 " sl "
2024 "local_address "
2025 "remote_address "
2026 "st tx_queue rx_queue tr tm->when retrnsmt"
2027 " uid timeout inode\n");
2028 goto out;
2029 }
2030 st = seq->private;
2031
Eric Dumazet079096f2015-10-02 11:43:32 -07002032 if (sk->sk_state == TCP_TIME_WAIT)
2033 get_timewait6_sock(seq, v, st->num);
2034 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002035 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002036 else
2037 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038out:
2039 return 0;
2040}
2041
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002042static const struct seq_operations tcp6_seq_ops = {
2043 .show = tcp6_seq_show,
2044 .start = tcp_seq_start,
2045 .next = tcp_seq_next,
2046 .stop = tcp_seq_stop,
2047};
2048
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051};
2052
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002053int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054{
Christoph Hellwigc3506372018-04-10 19:42:55 +02002055 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2056 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002057 return -ENOMEM;
2058 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059}
2060
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07002061void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002063 remove_proc_entry("tcp6", net->proc_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064}
2065#endif
2066
2067struct proto tcpv6_prot = {
2068 .name = "TCPv6",
2069 .owner = THIS_MODULE,
2070 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002071 .pre_connect = tcp_v6_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 .connect = tcp_v6_connect,
2073 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002074 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 .ioctl = tcp_ioctl,
2076 .init = tcp_v6_init_sock,
2077 .destroy = tcp_v6_destroy_sock,
2078 .shutdown = tcp_shutdown,
2079 .setsockopt = tcp_setsockopt,
2080 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01002081 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002083 .sendmsg = tcp_sendmsg,
2084 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002086 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05002087 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002088 .unhash = inet_unhash,
2089 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07002091 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07002092 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 .sockets_allocated = &tcp_sockets_allocated,
2094 .memory_allocated = &tcp_memory_allocated,
2095 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002096 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07002097 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08002098 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2099 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 .max_header = MAX_TCP_HEADER,
2101 .obj_size = sizeof(struct tcp6_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08002102 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002103 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002104 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002105 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002106 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002107#ifdef CONFIG_COMPAT
2108 .compat_setsockopt = compat_tcp_setsockopt,
2109 .compat_getsockopt = compat_tcp_getsockopt,
2110#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002111 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112};
2113
David Aherna8e3bb32017-08-28 15:14:20 -07002114/* thinking of making this const? Don't.
2115 * early_demux can change based on sysctl.
2116 */
Julia Lawall39294c32017-08-01 18:27:28 +02002117static struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00002118 .early_demux = tcp_v6_early_demux,
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -06002119 .early_demux_handler = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 .handler = tcp_v6_rcv,
2121 .err_handler = tcp_v6_err,
2122 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2123};
2124
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125static struct inet_protosw tcpv6_protosw = {
2126 .type = SOCK_STREAM,
2127 .protocol = IPPROTO_TCP,
2128 .prot = &tcpv6_prot,
2129 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002130 .flags = INET_PROTOSW_PERMANENT |
2131 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132};
2133
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002134static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002135{
Denis V. Lunev56772422008-04-03 14:28:30 -07002136 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2137 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002138}
2139
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002140static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002141{
Denis V. Lunev56772422008-04-03 14:28:30 -07002142 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002143}
2144
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002145static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002146{
Haishuang Yan1946e672016-12-28 17:52:32 +08002147 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002148}
2149
2150static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002151 .init = tcpv6_net_init,
2152 .exit = tcpv6_net_exit,
2153 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002154};
2155
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002156int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002158 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08002159
Vlad Yasevich33362882012-11-15 08:49:15 +00002160 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2161 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00002162 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00002163
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002164 /* register inet6 protocol */
2165 ret = inet6_register_protosw(&tcpv6_protosw);
2166 if (ret)
2167 goto out_tcpv6_protocol;
2168
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002169 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002170 if (ret)
2171 goto out_tcpv6_protosw;
Mat Martineauf870fa02020-01-21 16:56:15 -08002172
2173 ret = mptcpv6_init();
2174 if (ret)
2175 goto out_tcpv6_pernet_subsys;
2176
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002177out:
2178 return ret;
2179
Mat Martineauf870fa02020-01-21 16:56:15 -08002180out_tcpv6_pernet_subsys:
2181 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002182out_tcpv6_protosw:
2183 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00002184out_tcpv6_protocol:
2185 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002186 goto out;
2187}
2188
Daniel Lezcano09f77092007-12-13 05:34:58 -08002189void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002190{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002191 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002192 inet6_unregister_protosw(&tcpv6_protosw);
2193 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194}