blob: 4435fa342e7aa756bd426ffe051ade7d84ac5523 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09007 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09009 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * linux/net/ipv4/tcp.c
11 * linux/net/ipv4/tcp_input.c
12 * linux/net/ipv4/tcp_output.c
13 *
14 * Fixes:
15 * Hideaki YOSHIFUJI : sin6_scope_id support
16 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
17 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
18 * a single port at the same time.
19 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
21
Herbert Xueb4dea52008-12-29 23:04:08 -080022#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/socket.h>
27#include <linux/sockios.h>
28#include <linux/net.h>
29#include <linux/jiffies.h>
30#include <linux/in.h>
31#include <linux/in6.h>
32#include <linux/netdevice.h>
33#include <linux/init.h>
34#include <linux/jhash.h>
35#include <linux/ipsec.h>
36#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080038#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ipv6.h>
40#include <linux/icmpv6.h>
41#include <linux/random.h>
Paolo Abeni0e219ae2019-05-03 17:01:37 +020042#include <linux/indirect_call_wrapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/tcp.h>
45#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030046#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080047#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <net/ipv6.h>
49#include <net/transp_v6.h>
50#include <net/addrconf.h>
51#include <net/ip6_route.h>
52#include <net/ip6_checksum.h>
53#include <net/inet_ecn.h>
54#include <net/protocol.h>
55#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <net/snmp.h>
57#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080058#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070059#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070060#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030061#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <linux/proc_fs.h>
64#include <linux/seq_file.h>
65
Herbert Xucf80e0e2016-01-24 21:20:23 +080066#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080067#include <linux/scatterlist.h>
68
Song Liuc24b14c2017-10-23 09:20:24 -070069#include <trace/events/tcp.h>
70
Eric Dumazeta00e7442015-09-29 07:42:39 -070071static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
72static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070073 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Stephen Hemminger3b401a82009-09-01 19:25:04 +000077static const struct inet_connection_sock_af_ops ipv6_mapped;
Mat Martineau35b2c322020-01-09 07:59:21 -080078const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080079#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +000080static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
81static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090082#else
Eric Dumazet51723932015-09-29 21:24:05 -070083static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
David Aherndea53bb2019-12-30 14:14:28 -080084 const struct in6_addr *addr,
85 int l3index)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090086{
87 return NULL;
88}
David S. Millera9286302006-11-14 19:53:22 -080089#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Eric Dumazet93a77c12019-03-19 07:01:08 -070091/* Helper returning the inet6 address from a given tcp socket.
92 * It can be used in TCP stack instead of inet6_sk(sk).
93 * This avoids a dereference and allow compiler optimizations.
Eric Dumazetf5d54762019-04-01 03:09:20 -070094 * It is a specialized version of inet6_sk_generic().
Eric Dumazet93a77c12019-03-19 07:01:08 -070095 */
96static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
97{
Eric Dumazetf5d54762019-04-01 03:09:20 -070098 unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
Eric Dumazet93a77c12019-03-19 07:01:08 -070099
Eric Dumazetf5d54762019-04-01 03:09:20 -0700100 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700101}
102
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000103static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104{
105 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000106
Eric Dumazet5037e9e2015-12-14 14:08:53 -0800107 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -0700108 const struct rt6_info *rt = (const struct rt6_info *)dst;
109
Eric Dumazetca777ef2014-09-08 08:06:07 -0700110 sk->sk_rx_dst = dst;
111 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Eric Dumazet93a77c12019-03-19 07:01:08 -0700112 tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700113 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000114}
115
Eric Dumazet84b114b2017-05-05 06:56:54 -0700116static u32 tcp_v6_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700118 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
122}
123
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700124static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700125{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700126 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700127 ipv6_hdr(skb)->saddr.s6_addr32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700130static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
131 int addr_len)
132{
133 /* This check is replicated from tcp_v6_connect() and intended to
134 * prevent BPF program called below from accessing bytes that are out
135 * of the bound specified by user in addr_len.
136 */
137 if (addr_len < SIN6_LEN_RFC2133)
138 return -EINVAL;
139
140 sock_owned_by_me(sk);
141
142 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
143}
144
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900145static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 int addr_len)
147{
148 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900149 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800150 struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700151 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000153 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800154 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500155 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 struct dst_entry *dst;
157 int addr_type;
158 int err;
Haishuang Yan1946e672016-12-28 17:52:32 +0800159 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900161 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 return -EINVAL;
163
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900164 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000165 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
David S. Miller4c9483b2011-03-12 16:22:43 -0500167 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500170 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
171 IP6_ECN_flow_init(fl6.flowlabel);
172 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500174 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Willem de Bruijn59c820b2019-07-07 05:34:45 -0400175 if (IS_ERR(flowlabel))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 fl6_sock_release(flowlabel);
178 }
179 }
180
181 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900182 * connect() to INADDR_ANY means loopback (BSD'ism).
183 */
184
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500185 if (ipv6_addr_any(&usin->sin6_addr)) {
186 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
187 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
188 &usin->sin6_addr);
189 else
190 usin->sin6_addr = in6addr_loopback;
191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193 addr_type = ipv6_addr_type(&usin->sin6_addr);
194
Weilong Chen4c99aa42013-12-19 18:44:34 +0800195 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 return -ENETUNREACH;
197
198 if (addr_type&IPV6_ADDR_LINKLOCAL) {
199 if (addr_len >= sizeof(struct sockaddr_in6) &&
200 usin->sin6_scope_id) {
201 /* If interface is set while binding, indices
202 * must coincide.
203 */
David Ahern54dc3e32018-01-04 14:03:54 -0800204 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 return -EINVAL;
206
207 sk->sk_bound_dev_if = usin->sin6_scope_id;
208 }
209
210 /* Connect to link-local address requires an interface */
211 if (!sk->sk_bound_dev_if)
212 return -EINVAL;
213 }
214
215 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700216 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 tp->rx_opt.ts_recent = 0;
218 tp->rx_opt.ts_recent_stamp = 0;
Eric Dumazet0f317462019-10-10 20:17:41 -0700219 WRITE_ONCE(tp->write_seq, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
221
Eric Dumazetefe42082013-10-03 15:42:29 -0700222 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500223 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 /*
226 * TCP over IPv4
227 */
228
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500229 if (addr_type & IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800230 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 struct sockaddr_in sin;
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 if (__ipv6_only_sock(sk))
234 return -ENETUNREACH;
235
236 sin.sin_family = AF_INET;
237 sin.sin_port = usin->sin6_port;
238 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
239
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800240 icsk->icsk_af_ops = &ipv6_mapped;
Peter Krystadcec37a62020-01-21 16:56:18 -0800241 if (sk_is_mptcp(sk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +0100242 mptcpv6_handle_mapped(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800244#ifdef CONFIG_TCP_MD5SIG
245 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
246#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
249
250 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800251 icsk->icsk_ext_hdr_len = exthdrlen;
252 icsk->icsk_af_ops = &ipv6_specific;
Peter Krystadcec37a62020-01-21 16:56:18 -0800253 if (sk_is_mptcp(sk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +0100254 mptcpv6_handle_mapped(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800256#ifdef CONFIG_TCP_MD5SIG
257 tp->af_specific = &tcp_sock_ipv6_specific;
258#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700261 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263 return err;
264 }
265
Eric Dumazetefe42082013-10-03 15:42:29 -0700266 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
267 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
David S. Miller4c9483b2011-03-12 16:22:43 -0500269 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700270 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000271 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500272 fl6.flowi6_oif = sk->sk_bound_dev_if;
273 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500274 fl6.fl6_dport = usin->sin6_port;
275 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900276 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200278 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800279 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Paul Moore3df98d72020-09-27 22:38:26 -0400281 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700282
Sabrina Dubrocac4e85f72019-12-04 15:35:52 +0100283 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800284 if (IS_ERR(dst)) {
285 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Ian Morris63159f22015-03-29 14:00:04 +0100289 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500290 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700291 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 }
293
294 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000295 np->saddr = *saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000296 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700298 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800299 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800301 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800302 if (opt)
303 icsk->icsk_ext_hdr_len = opt->opt_flen +
304 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
307
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000308 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800311 err = inet6_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 if (err)
313 goto late_failure;
314
Tom Herbert877d1f62015-07-28 16:02:05 -0700315 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530316
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300317 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300318 if (!tp->write_seq)
Eric Dumazet0f317462019-10-10 20:17:41 -0700319 WRITE_ONCE(tp->write_seq,
320 secure_tcpv6_seq(np->saddr.s6_addr32,
321 sk->sk_v6_daddr.s6_addr32,
322 inet->inet_sport,
323 inet->inet_dport));
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700324 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
325 np->saddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700326 sk->sk_v6_daddr.s6_addr32);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Wei Wang19f6d3f2017-01-23 10:59:22 -0800329 if (tcp_fastopen_defer_connect(sk, &err))
330 return err;
331 if (err)
332 goto late_failure;
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 err = tcp_connect(sk);
335 if (err)
336 goto late_failure;
337
338 return 0;
339
340late_failure:
341 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342failure:
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000343 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 sk->sk_route_caps = 0;
345 return err;
346}
347
Eric Dumazet563d34d2012-07-23 09:48:52 +0200348static void tcp_v6_mtu_reduced(struct sock *sk)
349{
350 struct dst_entry *dst;
351
352 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
353 return;
354
355 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
356 if (!dst)
357 return;
358
359 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
360 tcp_sync_mss(sk, dst_mtu(dst));
361 tcp_simple_retransmit(sk);
362 }
363}
364
Stefano Brivio32bbd872018-11-08 12:19:21 +0100365static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700366 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800368 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300369 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700370 struct net *net = dev_net(skb->dev);
371 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700373 struct tcp_sock *tp;
374 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800376 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
Eric Dumazet22150892015-03-22 10:22:23 -0700379 sk = __inet6_lookup_established(net, &tcp_hashinfo,
380 &hdr->daddr, th->dest,
381 &hdr->saddr, ntohs(th->source),
David Ahern4297a0e2017-08-07 08:44:21 -0700382 skb->dev->ifindex, inet6_sdif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
Eric Dumazet22150892015-03-22 10:22:23 -0700384 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700385 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
386 ICMP6_MIB_INERRORS);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100387 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 }
389
390 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700391 inet_twsk_put(inet_twsk(sk));
Stefano Brivio32bbd872018-11-08 12:19:21 +0100392 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 }
Eric Dumazet22150892015-03-22 10:22:23 -0700394 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800395 fatal = icmpv6_err_convert(type, code, &err);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100396 if (sk->sk_state == TCP_NEW_SYN_RECV) {
397 tcp_req_err(sk, seq, fatal);
398 return 0;
399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200402 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700403 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 if (sk->sk_state == TCP_CLOSE)
406 goto out;
407
Eric Dumazet93a77c12019-03-19 07:01:08 -0700408 if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700409 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700410 goto out;
411 }
412
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700414 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
Eric Dumazetd983ea62019-10-10 20:17:38 -0700415 fastopen = rcu_dereference(tp->fastopen_rsk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700416 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700418 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700419 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 goto out;
421 }
422
Eric Dumazet93a77c12019-03-19 07:01:08 -0700423 np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
David S. Millerec18d9a2012-07-12 00:25:15 -0700425 if (type == NDISC_REDIRECT) {
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100426 if (!sock_owned_by_user(sk)) {
427 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
David S. Millerec18d9a2012-07-12 00:25:15 -0700428
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100429 if (dst)
430 dst->ops->redirect(dst, sk, skb);
431 }
Christoph Paasch50a75a82013-04-07 04:53:15 +0000432 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700433 }
434
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000436 /* We are not interested in TCP_LISTEN and open_requests
437 * (SYN-ACKs send out by Linux are always <576bytes so
438 * they should go through unfragmented).
439 */
440 if (sk->sk_state == TCP_LISTEN)
441 goto out;
442
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100443 if (!ip6_sk_accept_pmtu(sk))
444 goto out;
445
Eric Dumazet563d34d2012-07-23 09:48:52 +0200446 tp->mtu_info = ntohl(info);
447 if (!sock_owned_by_user(sk))
448 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000449 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800450 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000451 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 goto out;
453 }
454
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700456 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700459 case TCP_SYN_RECV:
460 /* Only in fast or simultaneous open. If a fast open socket is
Randy Dunlap634a63e2020-09-17 21:35:17 -0700461 * already accepted it is treated as a connected one below.
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700462 */
Ian Morris63159f22015-03-29 14:00:04 +0100463 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700464 break;
465
Eric Dumazet45af29c2020-05-24 11:00:02 -0700466 ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 sk->sk_err = err;
470 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
471
472 tcp_done(sk);
473 } else
474 sk->sk_err_soft = err;
475 goto out;
Eric Dumazetd2924562020-05-27 17:34:58 -0700476 case TCP_LISTEN:
477 break;
478 default:
479 /* check if this ICMP message allows revert of backoff.
480 * (see RFC 6069)
481 */
482 if (!fastopen && type == ICMPV6_DEST_UNREACH &&
483 code == ICMPV6_NOROUTE)
484 tcp_ld_RTO_revert(sk, seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 }
486
487 if (!sock_owned_by_user(sk) && np->recverr) {
488 sk->sk_err = err;
489 sk->sk_error_report(sk);
490 } else
491 sk->sk_err_soft = err;
492
493out:
494 bh_unlock_sock(sk);
495 sock_put(sk);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100496 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497}
498
499
Eric Dumazet0f935db2015-09-25 07:39:21 -0700500static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300501 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000502 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700503 struct tcp_fastopen_cookie *foc,
Martin KaFai Lau331fca42020-08-20 12:00:52 -0700504 enum tcp_synack_type synack_type,
505 struct sk_buff *syn_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700507 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700508 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400509 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300510 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800511 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000512 int err = -ENOMEM;
Wei Wangac8f1712020-09-09 17:50:48 -0700513 u8 tclass;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000515 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700516 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
517 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800518 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000519
Martin KaFai Lau331fca42020-08-20 12:00:52 -0700520 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
Neal Cardwell94942182012-06-28 12:34:20 +0000521
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700523 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
524 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
Eric Dumazet634fb9792013-10-09 15:21:29 -0700526 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100527 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100528 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
529
Wei Wangac8f1712020-09-09 17:50:48 -0700530 tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
Wei Wang8ef44b62020-12-08 09:55:08 -0800531 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
532 (np->tclass & INET_ECN_MASK) :
Alexander Duyck861602b2020-11-19 13:23:51 -0800533 np->tclass;
Alexander Duyck407c85c2020-11-20 19:47:44 -0800534
535 if (!INET_ECN_is_capable(tclass) &&
536 tcp_bpf_ca_needs_ecn((struct sock *)req))
537 tclass |= INET_ECN_ECT_0;
538
539 rcu_read_lock();
540 opt = ireq->ipv6_opt;
Huw Davies56ac42b2016-06-27 15:05:28 -0400541 if (!opt)
542 opt = rcu_dereference(np->opt);
Wei Wangac8f1712020-09-09 17:50:48 -0700543 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt,
Alexander Duyck861602b2020-11-19 13:23:51 -0800544 tclass, sk->sk_priority);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800545 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200546 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 }
548
549done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 return err;
551}
552
Octavian Purdila72659ec2010-01-17 19:09:39 -0800553
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700554static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555{
Huw Davies56ac42b2016-06-27 15:05:28 -0400556 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700557 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558}
559
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800560#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700561static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
David Aherndea53bb2019-12-30 14:14:28 -0800562 const struct in6_addr *addr,
563 int l3index)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800564{
David Aherndea53bb2019-12-30 14:14:28 -0800565 return tcp_md5_do_lookup(sk, l3index,
566 (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800567}
568
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700569static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700570 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800571{
David Aherndea53bb2019-12-30 14:14:28 -0800572 int l3index;
573
574 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
575 addr_sk->sk_bound_dev_if);
576 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
577 l3index);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800578}
579
Ivan Delalande8917a772017-06-15 18:07:07 -0700580static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
Christoph Hellwigd4c19c42020-07-23 08:09:05 +0200581 sockptr_t optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800582{
583 struct tcp_md5sig cmd;
584 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
David Aherndea53bb2019-12-30 14:14:28 -0800585 int l3index = 0;
Ivan Delalande8917a772017-06-15 18:07:07 -0700586 u8 prefixlen;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800587
588 if (optlen < sizeof(cmd))
589 return -EINVAL;
590
Christoph Hellwigd4c19c42020-07-23 08:09:05 +0200591 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800592 return -EFAULT;
593
594 if (sin6->sin6_family != AF_INET6)
595 return -EINVAL;
596
Ivan Delalande8917a772017-06-15 18:07:07 -0700597 if (optname == TCP_MD5SIG_EXT &&
598 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
599 prefixlen = cmd.tcpm_prefixlen;
600 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
601 prefixlen > 32))
602 return -EINVAL;
603 } else {
604 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
605 }
606
David Ahern6b102db2019-12-30 14:14:29 -0800607 if (optname == TCP_MD5SIG_EXT &&
608 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
609 struct net_device *dev;
610
611 rcu_read_lock();
612 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
613 if (dev && netif_is_l3_master(dev))
614 l3index = dev->ifindex;
615 rcu_read_unlock();
616
617 /* ok to reference set/not set outside of rcu;
618 * right now device MUST be an L3 master
619 */
620 if (!dev || !l3index)
621 return -EINVAL;
622 }
623
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800624 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700625 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000626 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
David Ahern6b102db2019-12-30 14:14:29 -0800627 AF_INET, prefixlen,
628 l3index);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000629 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
David Aherndea53bb2019-12-30 14:14:28 -0800630 AF_INET6, prefixlen, l3index);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800631 }
632
633 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
634 return -EINVAL;
635
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000636 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
637 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
David Aherndea53bb2019-12-30 14:14:28 -0800638 AF_INET, prefixlen, l3index,
639 cmd.tcpm_key, cmd.tcpm_keylen,
640 GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800641
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000642 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
David Aherndea53bb2019-12-30 14:14:28 -0800643 AF_INET6, prefixlen, l3index,
644 cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800645}
646
Eric Dumazet19689e32016-06-27 18:51:53 +0200647static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
648 const struct in6_addr *daddr,
649 const struct in6_addr *saddr,
650 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800651{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800652 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700653 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200654 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900655
Eric Dumazet19689e32016-06-27 18:51:53 +0200656 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800657 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000658 bp->saddr = *saddr;
659 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700660 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700661 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800662
Eric Dumazet19689e32016-06-27 18:51:53 +0200663 _th = (struct tcphdr *)(bp + 1);
664 memcpy(_th, th, sizeof(*th));
665 _th->check = 0;
666
667 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
668 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
669 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800670 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700671}
David S. Millerc7da57a2007-10-26 00:41:21 -0700672
Eric Dumazet19689e32016-06-27 18:51:53 +0200673static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000674 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400675 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700676{
677 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800678 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700679
680 hp = tcp_get_md5sig_pool();
681 if (!hp)
682 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800683 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700684
Herbert Xucf80e0e2016-01-24 21:20:23 +0800685 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700686 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200687 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700688 goto clear_hash;
689 if (tcp_md5_hash_key(hp, key))
690 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800691 ahash_request_set_crypt(req, NULL, md5_hash, 0);
692 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800693 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800694
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800695 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800696 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700697
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800698clear_hash:
699 tcp_put_md5sig_pool();
700clear_hash_noput:
701 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700702 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800703}
704
Eric Dumazet39f8e582015-03-24 15:58:55 -0700705static int tcp_v6_md5_hash_skb(char *md5_hash,
706 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400707 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400708 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800709{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000710 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700711 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800712 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400713 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800714
Eric Dumazet39f8e582015-03-24 15:58:55 -0700715 if (sk) { /* valid for establish/request sockets */
716 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700717 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700718 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000719 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700720 saddr = &ip6h->saddr;
721 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800722 }
Adam Langley49a72df2008-07-19 00:01:42 -0700723
724 hp = tcp_get_md5sig_pool();
725 if (!hp)
726 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800727 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700728
Herbert Xucf80e0e2016-01-24 21:20:23 +0800729 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700730 goto clear_hash;
731
Eric Dumazet19689e32016-06-27 18:51:53 +0200732 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700733 goto clear_hash;
734 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
735 goto clear_hash;
736 if (tcp_md5_hash_key(hp, key))
737 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800738 ahash_request_set_crypt(req, NULL, md5_hash, 0);
739 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700740 goto clear_hash;
741
742 tcp_put_md5sig_pool();
743 return 0;
744
745clear_hash:
746 tcp_put_md5sig_pool();
747clear_hash_noput:
748 memset(md5_hash, 0, 16);
749 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800750}
751
Eric Dumazetba8e2752015-10-02 11:43:28 -0700752#endif
753
754static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
David Ahernd14c77e2019-12-30 14:14:26 -0800755 const struct sk_buff *skb,
756 int dif, int sdif)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800757{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700758#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400759 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800760 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000761 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400762 const struct tcphdr *th = tcp_hdr(skb);
David Aherndea53bb2019-12-30 14:14:28 -0800763 int genhash, l3index;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800764 u8 newhash[16];
765
David Aherndea53bb2019-12-30 14:14:28 -0800766 /* sdif set, means packet ingressed via a device
767 * in an L3 domain and dif is set to the l3mdev
768 */
769 l3index = sdif ? dif : 0;
770
771 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr, l3index);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900772 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800773
David S. Miller785957d2008-07-30 03:03:15 -0700774 /* We've parsed the options - do we have a hash? */
775 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700776 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700777
778 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700779 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700780 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800781 }
782
David S. Miller785957d2008-07-30 03:03:15 -0700783 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700784 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700785 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800786 }
787
788 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700789 genhash = tcp_v6_md5_hash_skb(newhash,
790 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700791 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700792
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800793 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700794 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
David Aherndea53bb2019-12-30 14:14:28 -0800795 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
Joe Perchese87cc472012-05-13 21:56:26 +0000796 genhash ? "failed" : "mismatch",
797 &ip6h->saddr, ntohs(th->source),
David Aherndea53bb2019-12-30 14:14:28 -0800798 &ip6h->daddr, ntohs(th->dest), l3index);
Eric Dumazetff74e232015-03-24 15:58:54 -0700799 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800800 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700801#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700802 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800803}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800804
Eric Dumazetb40cf182015-09-25 07:39:08 -0700805static void tcp_v6_init_req(struct request_sock *req,
806 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300807 struct sk_buff *skb)
808{
David Ahernc2027d12018-12-12 15:27:38 -0800809 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
Octavian Purdila16bea702014-06-25 17:09:53 +0300810 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700811 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300812
813 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
814 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
815
Octavian Purdila16bea702014-06-25 17:09:53 +0300816 /* So that link locals have meaning */
David Ahernc2027d12018-12-12 15:27:38 -0800817 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300818 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700819 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300820
Eric Dumazet04317da2014-09-05 15:33:32 -0700821 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700822 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700823 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300824 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
825 np->rxopt.bits.rxohlim || np->repflow)) {
Reshetova, Elena63354792017-06-30 13:07:58 +0300826 refcount_inc(&skb->users);
Octavian Purdila16bea702014-06-25 17:09:53 +0300827 ireq->pktopts = skb;
828 }
829}
830
Eric Dumazetf9646292015-09-29 07:42:50 -0700831static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
Florian Westphal7ea851d2020-11-30 16:36:30 +0100832 struct sk_buff *skb,
Eric Dumazetf9646292015-09-29 07:42:50 -0700833 struct flowi *fl,
Florian Westphal7ea851d2020-11-30 16:36:30 +0100834 struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +0300835{
Florian Westphal7ea851d2020-11-30 16:36:30 +0100836 tcp_v6_init_req(req, sk, skb);
837
838 if (security_inet_conn_request(sk, skb, req))
839 return NULL;
840
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700841 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300842}
843
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800844struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700846 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300847 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700848 .send_ack = tcp_v6_reqsk_send_ack,
849 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800850 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800851 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852};
853
Mat Martineau35b2c322020-01-09 07:59:21 -0800854const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300855 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
856 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300857#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700858 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000859 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800860#endif
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300861#ifdef CONFIG_SYN_COOKIES
862 .cookie_init_seq = cookie_v6_init_sequence,
863#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300864 .route_req = tcp_v6_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700865 .init_seq = tcp_v6_init_seq,
866 .init_ts_off = tcp_v6_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300867 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300868};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800869
Eric Dumazeta00e7442015-09-29 07:42:39 -0700870static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800871 u32 ack, u32 win, u32 tsval, u32 tsecr,
872 int oif, struct tcp_md5sig_key *key, int rst,
Eric Dumazete9a5dce2019-09-24 08:01:15 -0700873 u8 tclass, __be32 label, u32 priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400875 const struct tcphdr *th = tcp_hdr(skb);
876 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500878 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800879 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800880 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800881 unsigned int tot_len = sizeof(struct tcphdr);
Florian Westphaldc87efd2021-04-01 16:19:44 -0700882 __be32 mrst = 0, *topt;
Eric Dumazetadf30902009-06-02 05:19:30 +0000883 struct dst_entry *dst;
Jon Maxwell00483692018-05-10 16:53:51 +1000884 __u32 mark = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
Andrey Vaginee684b62013-02-11 05:50:19 +0000886 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700887 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800888#ifdef CONFIG_TCP_MD5SIG
889 if (key)
890 tot_len += TCPOLEN_MD5SIG_ALIGNED;
891#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892
Florian Westphaldc87efd2021-04-01 16:19:44 -0700893#ifdef CONFIG_MPTCP
894 if (rst && !key) {
895 mrst = mptcp_reset_option(skb);
896
897 if (mrst)
898 tot_len += sizeof(__be32);
899 }
900#endif
901
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
903 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100904 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 return;
906
907 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
908
Johannes Bergd58ff352017-06-16 14:29:23 +0200909 t1 = skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700910 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
912 /* Swap the send and the receive. */
913 memset(t1, 0, sizeof(*t1));
914 t1->dest = th->source;
915 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700916 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 t1->seq = htonl(seq);
918 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700919 t1->ack = !rst || !th->ack;
920 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800922
Al Viroe69a4ad2006-11-14 20:56:00 -0800923 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900924
Andrey Vaginee684b62013-02-11 05:50:19 +0000925 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800926 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
927 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000928 *topt++ = htonl(tsval);
929 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 }
931
Florian Westphaldc87efd2021-04-01 16:19:44 -0700932 if (mrst)
933 *topt++ = mrst;
934
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800935#ifdef CONFIG_TCP_MD5SIG
936 if (key) {
937 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
938 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700939 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700940 &ipv6_hdr(skb)->saddr,
941 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800942 }
943#endif
944
David S. Miller4c9483b2011-03-12 16:22:43 -0500945 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000946 fl6.daddr = ipv6_hdr(skb)->saddr;
947 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100948 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949
David S. Millere5700af2010-04-21 14:59:20 -0700950 buff->ip_summed = CHECKSUM_PARTIAL;
951 buff->csum = 0;
952
David S. Miller4c9483b2011-03-12 16:22:43 -0500953 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
David S. Miller4c9483b2011-03-12 16:22:43 -0500955 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900956 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700957 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800958 else {
959 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
960 oif = skb->skb_iif;
961
962 fl6.flowi6_oif = oif;
963 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700964
Eric Dumazetc67b8552019-06-08 17:58:51 -0700965 if (sk) {
966 if (sk->sk_state == TCP_TIME_WAIT) {
967 mark = inet_twsk(sk)->tw_mark;
968 /* autoflowlabel relies on buff->hash */
969 skb_set_hash(buff, inet_twsk(sk)->tw_txhash,
970 PKT_HASH_TYPE_L4);
971 } else {
972 mark = sk->sk_mark;
973 }
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700974 buff->tstamp = tcp_transmit_time(sk);
Eric Dumazetc67b8552019-06-08 17:58:51 -0700975 }
Jon Maxwell00483692018-05-10 16:53:51 +1000976 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
David S. Miller1958b852011-03-12 16:36:19 -0500977 fl6.fl6_dport = t1->dest;
978 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900979 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
Paul Moore3df98d72020-09-27 22:38:26 -0400980 security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700982 /* Pass a socket to ip6_dst_lookup either it is for RST
983 * Underlying function will use this to retrieve the network
984 * namespace
985 */
Sabrina Dubrocac4e85f72019-12-04 15:35:52 +0100986 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800987 if (!IS_ERR(dst)) {
988 skb_dst_set(buff, dst);
Wei Wange92dd772020-09-08 14:29:02 -0700989 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
990 tclass & ~INET_ECN_MASK, priority);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700991 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800992 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700993 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800994 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 }
996
997 kfree_skb(buff);
998}
999
Eric Dumazeta00e7442015-09-29 07:42:39 -07001000static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001001{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001002 const struct tcphdr *th = tcp_hdr(skb);
Eric Dumazet323a53c2019-06-05 07:55:09 -07001003 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001004 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -07001005 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001006#ifdef CONFIG_TCP_MD5SIG
1007 const __u8 *hash_location = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001008 unsigned char newhash[16];
1009 int genhash;
1010 struct sock *sk1 = NULL;
1011#endif
Eric Dumazet323a53c2019-06-05 07:55:09 -07001012 __be32 label = 0;
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001013 u32 priority = 0;
Eric Dumazet323a53c2019-06-05 07:55:09 -07001014 struct net *net;
Song Liuc24b14c2017-10-23 09:20:24 -07001015 int oif = 0;
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001016
1017 if (th->rst)
1018 return;
1019
Eric Dumazetc3658e82014-11-25 07:40:04 -08001020 /* If sk not NULL, it means we did a successful lookup and incoming
1021 * route had to be correct. prequeue might have dropped our dst.
1022 */
1023 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001024 return;
1025
Eric Dumazet39209672019-06-07 12:23:48 -07001026 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001027#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -07001028 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +00001029 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +01001030 if (sk && sk_fullsock(sk)) {
David Aherndea53bb2019-12-30 14:14:28 -08001031 int l3index;
1032
1033 /* sdif set, means packet ingressed via a device
1034 * in an L3 domain and inet_iif is set to it.
1035 */
1036 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1037 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
Florian Westphale46787f2015-12-21 21:29:25 +01001038 } else if (hash_location) {
David Ahernd14c77e2019-12-30 14:14:26 -08001039 int dif = tcp_v6_iif_l3_slave(skb);
1040 int sdif = tcp_v6_sdif(skb);
David Aherndea53bb2019-12-30 14:14:28 -08001041 int l3index;
David Ahernd14c77e2019-12-30 14:14:26 -08001042
Shawn Lu658ddaa2012-01-31 22:35:48 +00001043 /*
1044 * active side is lost. Try to find listening socket through
1045 * source port, and then find md5 key through listening socket.
1046 * we are not loose security here:
1047 * Incoming packet is checked with md5 hash with finding key,
1048 * no RST generated if md5 hash doesn't match.
1049 */
Eric Dumazet323a53c2019-06-05 07:55:09 -07001050 sk1 = inet6_lookup_listener(net,
Craig Galleka5836362016-02-10 11:50:38 -05001051 &tcp_hashinfo, NULL, 0,
1052 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +00001053 th->source, &ipv6h->daddr,
David Ahernd14c77e2019-12-30 14:14:26 -08001054 ntohs(th->source), dif, sdif);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001055 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001056 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001057
David Aherndea53bb2019-12-30 14:14:28 -08001058 /* sdif set, means packet ingressed via a device
1059 * in an L3 domain and dif is set to it.
1060 */
1061 l3index = tcp_v6_sdif(skb) ? dif : 0;
1062
1063 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001064 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001065 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001066
Eric Dumazet39f8e582015-03-24 15:58:55 -07001067 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001068 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001069 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001070 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001071#endif
1072
1073 if (th->ack)
1074 seq = ntohl(th->ack_seq);
1075 else
1076 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1077 (th->doff << 2);
1078
Song Liuc24b14c2017-10-23 09:20:24 -07001079 if (sk) {
1080 oif = sk->sk_bound_dev_if;
Eric Dumazet052e0692019-07-10 06:40:09 -07001081 if (sk_fullsock(sk)) {
1082 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1083
Song Liu5c487bb2018-02-06 20:50:23 -08001084 trace_tcp_send_reset(sk, skb);
Eric Dumazet052e0692019-07-10 06:40:09 -07001085 if (np->repflow)
1086 label = ip6_flowlabel(ipv6h);
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001087 priority = sk->sk_priority;
Eric Dumazet052e0692019-07-10 06:40:09 -07001088 }
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001089 if (sk->sk_state == TCP_TIME_WAIT) {
Eric Dumazet50a8acc2019-06-05 07:55:10 -07001090 label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001091 priority = inet_twsk(sk)->tw_priority;
1092 }
Eric Dumazet323a53c2019-06-05 07:55:09 -07001093 } else {
Eric Dumazeta346abe2019-07-01 06:39:36 -07001094 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
Eric Dumazet323a53c2019-06-05 07:55:09 -07001095 label = ip6_flowlabel(ipv6h);
Song Liuc24b14c2017-10-23 09:20:24 -07001096 }
1097
Wei Wange92dd772020-09-08 14:29:02 -07001098 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
1099 ipv6_get_dsfield(ipv6h), label, priority);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001100
1101#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -07001102out:
1103 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +00001104#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001105}
1106
Eric Dumazeta00e7442015-09-29 07:42:39 -07001107static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001108 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +01001109 struct tcp_md5sig_key *key, u8 tclass,
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001110 __be32 label, u32 priority)
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001111{
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001112 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001113 tclass, label, priority);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001114}
1115
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1117{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001118 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001119 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001121 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001122 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001123 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +08001124 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001125 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001127 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128}
1129
Eric Dumazeta00e7442015-09-29 07:42:39 -07001130static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -07001131 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132{
David Aherndea53bb2019-12-30 14:14:28 -08001133 int l3index;
1134
1135 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1136
Daniel Lee3a19ce02014-05-11 20:22:13 -07001137 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1138 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1139 */
Eric Dumazet20a2b492016-08-22 11:31:10 -07001140 /* RFC 7323 2.3
1141 * The window field (SEG.WND) of every outgoing segment, with the
1142 * exception of <SYN> segments, MUST be right-shifted by
1143 * Rcv.Wind.Shift bits:
1144 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001145 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -07001146 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -07001147 tcp_rsk(req)->rcv_nxt,
1148 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001149 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
Florian Westphal95a22ca2016-12-01 11:32:06 +01001150 req->ts_recent, sk->sk_bound_dev_if,
David Aherndea53bb2019-12-30 14:14:28 -08001151 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
Wei Wange92dd772020-09-08 14:29:02 -07001152 ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153}
1154
1155
Eric Dumazet079096f2015-10-02 11:43:32 -07001156static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001158#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001159 const struct tcphdr *th = tcp_hdr(skb);
1160
Florian Westphalaf9b4732010-06-03 00:43:44 +00001161 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001162 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163#endif
1164 return sk;
1165}
1166
Petar Penkov9349d602019-07-29 09:59:14 -07001167u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1168 struct tcphdr *th, u32 *cookie)
1169{
1170 u16 mss = 0;
1171#ifdef CONFIG_SYN_COOKIES
1172 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1173 &tcp_request_sock_ipv6_ops, sk, th);
1174 if (mss) {
1175 *cookie = __cookie_v6_init_sequence(iph, th, &mss);
1176 tcp_synq_overflow(sk);
1177 }
1178#endif
1179 return mss;
1180}
1181
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1183{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184 if (skb->protocol == htons(ETH_P_IP))
1185 return tcp_v4_conn_request(sk, skb);
1186
1187 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001188 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
Jakub Kicinskidcc32f4f2021-03-17 09:55:15 -07001190 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1191 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1192 return 0;
1193 }
1194
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001195 return tcp_conn_request(&tcp6_request_sock_ops,
1196 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001199 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 return 0; /* don't send reset */
1201}
1202
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001203static void tcp_v6_restore_cb(struct sk_buff *skb)
1204{
1205 /* We need to move header back to the beginning if xfrm6_policy_check()
1206 * and tcp_v6_fill_cb() are going to be called again.
1207 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1208 */
1209 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1210 sizeof(struct inet6_skb_parm));
1211}
1212
Eric Dumazet0c271712015-09-29 07:42:48 -07001213static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001214 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001215 struct dst_entry *dst,
1216 struct request_sock *req_unhash,
1217 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001219 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001220 struct ipv6_pinfo *newnp;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001221 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001222 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 struct inet_sock *newinet;
Ricardo Dias01770a12020-11-20 11:11:33 +00001224 bool found_dup_sk = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001225 struct tcp_sock *newtp;
1226 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001227#ifdef CONFIG_TCP_MD5SIG
1228 struct tcp_md5sig_key *key;
David Aherndea53bb2019-12-30 14:14:28 -08001229 int l3index;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001230#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001231 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232
1233 if (skb->protocol == htons(ETH_P_IP)) {
1234 /*
1235 * v6 mapped
1236 */
1237
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001238 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1239 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
Ian Morris63159f22015-03-29 14:00:04 +01001241 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 return NULL;
1243
Eric Dumazet93a77c12019-03-19 07:01:08 -07001244 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
1246 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001247 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 newtp = tcp_sk(newsk);
1249
1250 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1251
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001252 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001254 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Peter Krystadcec37a62020-01-21 16:56:18 -08001255 if (sk_is_mptcp(newsk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001256 mptcpv6_handle_mapped(newsk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001258#ifdef CONFIG_TCP_MD5SIG
1259 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1260#endif
1261
WANG Cong83eadda2017-05-09 16:59:54 -07001262 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001263 newnp->ipv6_ac_list = NULL;
1264 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 newnp->pktoptions = NULL;
1266 newnp->opt = NULL;
Eric Dumazet89e41302019-03-19 05:45:35 -07001267 newnp->mcast_oif = inet_iif(skb);
1268 newnp->mcast_hops = ip_hdr(skb)->ttl;
1269 newnp->rcv_flowinfo = 0;
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001270 if (np->repflow)
Eric Dumazet89e41302019-03-19 05:45:35 -07001271 newnp->flow_label = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001273 /*
1274 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1275 * here, tcp_create_openreq_child now does this for us, see the comment in
1276 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278
1279 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001280 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 Sync it now.
1282 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001283 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
1285 return newsk;
1286 }
1287
Eric Dumazet634fb9792013-10-09 15:21:29 -07001288 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289
1290 if (sk_acceptq_is_full(sk))
1291 goto out_overflow;
1292
David S. Miller493f3772010-12-02 12:14:29 -08001293 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001294 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001295 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001297 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
1299 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001300 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001301 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001303 /*
1304 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1305 * count here, tcp_create_openreq_child now does this for us, see the
1306 * comment in that function for the gory details. -acme
1307 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
Stephen Hemminger59eed272006-08-25 15:55:43 -07001309 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001310 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001311 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Eric Dumazet93a77c12019-03-19 07:01:08 -07001313 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
1315 newtp = tcp_sk(newsk);
1316 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001317 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
1319 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1320
Eric Dumazet634fb9792013-10-09 15:21:29 -07001321 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1322 newnp->saddr = ireq->ir_v6_loc_addr;
1323 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1324 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001326 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
1328 First: no IPv4 options.
1329 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001330 newinet->inet_opt = NULL;
WANG Cong83eadda2017-05-09 16:59:54 -07001331 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001332 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001333 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
1335 /* Clone RX bits */
1336 newnp->rxopt.all = np->rxopt.all;
1337
Linus Torvalds1da177e2005-04-16 15:20:36 -07001338 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001340 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001341 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001342 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001343 if (np->repflow)
1344 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
Wei Wang8ef44b62020-12-08 09:55:08 -08001346 /* Set ToS of the new socket based upon the value of incoming SYN.
1347 * ECT bits are set later in tcp_init_transfer().
1348 */
Wei Wangac8f1712020-09-09 17:50:48 -07001349 if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
1350 newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1351
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 /* Clone native IPv6 options from listening socket (if any)
1353
1354 Yes, keeping reference count would be much more clever,
1355 but we make one more one thing there: reattach optmem
1356 to newsk.
1357 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001358 opt = ireq->ipv6_opt;
1359 if (!opt)
1360 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001361 if (opt) {
1362 opt = ipv6_dup_options(newsk, opt);
1363 RCU_INIT_POINTER(newnp->opt, opt);
1364 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001365 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001366 if (opt)
1367 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1368 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
Daniel Borkmann81164412015-01-05 23:57:48 +01001370 tcp_ca_openreq_child(newsk, dst);
1371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001373 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Neal Cardwelld135c522012-04-22 09:45:47 +00001374
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375 tcp_initialize_rcv_mss(newsk);
1376
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001377 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1378 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001380#ifdef CONFIG_TCP_MD5SIG
David Aherndea53bb2019-12-30 14:14:28 -08001381 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1382
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001383 /* Copy over the MD5 key from the original socket */
David Aherndea53bb2019-12-30 14:14:28 -08001384 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
Ian Morris53b24b82015-03-29 14:00:05 +01001385 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001386 /* We're using one, so create a matching key
1387 * on the newsk structure. If we fail to get
1388 * memory, then we end up not copying the key
1389 * across. Shucks.
1390 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001391 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
David Aherndea53bb2019-12-30 14:14:28 -08001392 AF_INET6, 128, l3index, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001393 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001394 }
1395#endif
1396
Balazs Scheidler093d2822010-10-21 13:06:43 +02001397 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001398 inet_csk_prepare_forced_close(newsk);
1399 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001400 goto out;
1401 }
Ricardo Dias01770a12020-11-20 11:11:33 +00001402 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1403 &found_dup_sk);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001404 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001405 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001406
1407 /* Clone pktoptions received with SYN, if we own the req */
1408 if (ireq->pktopts) {
1409 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001410 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001411 consume_skb(ireq->pktopts);
1412 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001413 if (newnp->pktoptions) {
1414 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001415 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001416 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001417 }
Ricardo Dias01770a12020-11-20 11:11:33 +00001418 } else {
1419 if (!req_unhash && found_dup_sk) {
1420 /* This code path should only be executed in the
1421 * syncookie case only
1422 */
1423 bh_unlock_sock(newsk);
1424 sock_put(newsk);
1425 newsk = NULL;
1426 }
Eric Dumazetce105002015-10-30 09:46:12 -07001427 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428
1429 return newsk;
1430
1431out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001432 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001433out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001435out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001436 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 return NULL;
1438}
1439
Brian Vazquezbbd807d2021-02-01 17:41:32 +00001440INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1441 u32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001443 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 *
1445 * We have a potential double-lock case here, so even when
1446 * doing backlog processing we use the BH locking scheme.
1447 * This is because we cannot sleep with the original spinlock
1448 * held.
1449 */
1450static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1451{
Eric Dumazet93a77c12019-03-19 07:01:08 -07001452 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 struct sk_buff *opt_skb = NULL;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001454 struct tcp_sock *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
1456 /* Imagine: socket is IPv6. IPv4 packet arrives,
1457 goes to IPv4 receive handler and backlogged.
1458 From backlog it always goes here. Kerboom...
1459 Fortunately, tcp_rcv_established and rcv_established
1460 handle them correctly, but it is not case with
1461 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1462 */
1463
1464 if (skb->protocol == htons(ETH_P_IP))
1465 return tcp_v4_do_rcv(sk, skb);
1466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 /*
1468 * socket locking is here for SMP purposes as backlog rcv
1469 * is currently called with bh processing disabled.
1470 */
1471
1472 /* Do Stevens' IPV6_PKTOPTIONS.
1473
1474 Yes, guys, it is the only place in our code, where we
1475 may make it not affecting IPv4.
1476 The rest of code is protocol independent,
1477 and I do not like idea to uglify IPv4.
1478
1479 Actually, all the idea behind IPV6_PKTOPTIONS
1480 looks not very well thought. For now we latch
1481 options, received in the last packet, enqueued
1482 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001483 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 */
1485 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001486 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
1488 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001489 struct dst_entry *dst = sk->sk_rx_dst;
1490
Tom Herbertbdeab992011-08-14 19:45:55 +00001491 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001492 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001493 if (dst) {
1494 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
Brian Vazquezbbd807d2021-02-01 17:41:32 +00001495 INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
1496 dst, np->rx_dst_cookie) == NULL) {
Eric Dumazet5d299f32012-08-06 05:09:33 +00001497 dst_release(dst);
1498 sk->sk_rx_dst = NULL;
1499 }
1500 }
1501
Yafang Shao3d97d882018-05-29 23:27:31 +08001502 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 if (opt_skb)
1504 goto ipv6_pktoptions;
1505 return 0;
1506 }
1507
Eric Dumazet12e25e12015-06-03 23:49:21 -07001508 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 goto csum_err;
1510
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001511 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001512 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1513
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 if (!nsk)
1515 goto discard;
1516
Weilong Chen4c99aa42013-12-19 18:44:34 +08001517 if (nsk != sk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 if (tcp_child_process(sk, nsk, skb))
1519 goto reset;
1520 if (opt_skb)
1521 __kfree_skb(opt_skb);
1522 return 0;
1523 }
Neil Horman47482f132011-04-06 13:07:09 -07001524 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001525 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001527 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 if (opt_skb)
1530 goto ipv6_pktoptions;
1531 return 0;
1532
1533reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001534 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535discard:
1536 if (opt_skb)
1537 __kfree_skb(opt_skb);
1538 kfree_skb(skb);
1539 return 0;
1540csum_err:
Jakub Kicinski709c0312021-05-14 13:04:25 -07001541 trace_tcp_bad_csum(skb);
Eric Dumazetc10d9312016-04-29 14:16:47 -07001542 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1543 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 goto discard;
1545
1546
1547ipv6_pktoptions:
1548 /* Do you ask, what is it?
1549
1550 1. skb was enqueued by tcp.
1551 2. skb is added to tail of read queue, rather than out of order.
1552 3. socket is not in passive state.
1553 4. Finally, it really contains options, which user wants to receive.
1554 */
1555 tp = tcp_sk(sk);
1556 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1557 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001558 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001559 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001560 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001561 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001562 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001563 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001564 if (np->repflow)
1565 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001566 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001568 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 opt_skb = xchg(&np->pktoptions, opt_skb);
1570 } else {
1571 __kfree_skb(opt_skb);
1572 opt_skb = xchg(&np->pktoptions, NULL);
1573 }
1574 }
1575
Wei Yongjun800d55f2009-02-23 21:45:33 +00001576 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 return 0;
1578}
1579
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001580static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1581 const struct tcphdr *th)
1582{
1583 /* This is tricky: we move IP6CB at its correct location into
1584 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1585 * _decode_session6() uses IP6CB().
1586 * barrier() makes sure compiler won't play aliasing games.
1587 */
1588 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1589 sizeof(struct inet6_skb_parm));
1590 barrier();
1591
1592 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1593 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1594 skb->len - th->doff*4);
1595 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1596 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1597 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1598 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1599 TCP_SKB_CB(skb)->sacked = 0;
Mike Maloney98aaa912017-08-22 17:08:48 -04001600 TCP_SKB_CB(skb)->has_rxtstamp =
1601 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001602}
1603
Paolo Abeni0e219ae2019-05-03 17:01:37 +02001604INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001605{
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001606 struct sk_buff *skb_to_free;
David Ahern4297a0e2017-08-07 08:44:21 -07001607 int sdif = inet6_sdif(skb);
David Ahernd14c77e2019-12-30 14:14:26 -08001608 int dif = inet6_iif(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001609 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001610 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001611 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 struct sock *sk;
1613 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001614 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
1616 if (skb->pkt_type != PACKET_HOST)
1617 goto discard_it;
1618
1619 /*
1620 * Count it even if it's bad.
1621 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001622 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623
1624 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1625 goto discard_it;
1626
Eric Dumazetea1627c2016-05-13 09:16:40 -07001627 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
Eric Dumazetea1627c2016-05-13 09:16:40 -07001629 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630 goto bad_packet;
1631 if (!pskb_may_pull(skb, th->doff*4))
1632 goto discard_it;
1633
Tom Herberte4f45b72014-05-02 16:29:51 -07001634 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001635 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636
Eric Dumazetea1627c2016-05-13 09:16:40 -07001637 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001638 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001640lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001641 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
David Ahern4297a0e2017-08-07 08:44:21 -07001642 th->source, th->dest, inet6_iif(skb), sdif,
Eric Dumazet3b24d852016-04-01 08:52:17 -07001643 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644 if (!sk)
1645 goto no_tcp_socket;
1646
1647process:
1648 if (sk->sk_state == TCP_TIME_WAIT)
1649 goto do_time_wait;
1650
Eric Dumazet079096f2015-10-02 11:43:32 -07001651 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1652 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001653 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001654 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001655
1656 sk = req->rsk_listener;
David Ahernd14c77e2019-12-30 14:14:26 -08001657 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001658 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001659 reqsk_put(req);
1660 goto discard_it;
1661 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001662 if (tcp_checksum_complete(skb)) {
1663 reqsk_put(req);
1664 goto csum_error;
1665 }
Eric Dumazet77166822016-02-18 05:39:18 -08001666 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001667 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001668 goto lookup;
1669 }
Eric Dumazet77166822016-02-18 05:39:18 -08001670 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001671 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001672 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001673 if (!tcp_filter(sk, skb)) {
1674 th = (const struct tcphdr *)skb->data;
1675 hdr = ipv6_hdr(skb);
1676 tcp_v6_fill_cb(skb, hdr, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001677 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001678 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001679 if (!nsk) {
1680 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001681 if (req_stolen) {
1682 /* Another cpu got exclusive access to req
1683 * and created a full blown socket.
1684 * Try to feed this packet to this socket
1685 * instead of discarding it.
1686 */
1687 tcp_v6_restore_cb(skb);
1688 sock_put(sk);
1689 goto lookup;
1690 }
Eric Dumazet77166822016-02-18 05:39:18 -08001691 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001692 }
1693 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001694 reqsk_put(req);
1695 tcp_v6_restore_cb(skb);
1696 } else if (tcp_child_process(sk, nsk, skb)) {
1697 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001698 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001699 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001700 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001701 return 0;
1702 }
1703 }
Eric Dumazet93a77c12019-03-19 07:01:08 -07001704 if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001705 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001706 goto discard_and_relse;
1707 }
1708
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1710 goto discard_and_relse;
1711
David Ahernd14c77e2019-12-30 14:14:26 -08001712 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif))
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001713 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001714
Eric Dumazetac6e7802016-11-10 13:12:35 -08001715 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001717 th = (const struct tcphdr *)skb->data;
1718 hdr = ipv6_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001719 tcp_v6_fill_cb(skb, hdr, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
1721 skb->dev = NULL;
1722
Eric Dumazete994b2f2015-10-02 11:43:39 -07001723 if (sk->sk_state == TCP_LISTEN) {
1724 ret = tcp_v6_do_rcv(sk, skb);
1725 goto put_and_return;
1726 }
1727
1728 sk_incoming_cpu_update(sk);
1729
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001730 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001731 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 ret = 0;
1733 if (!sock_owned_by_user(sk)) {
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001734 skb_to_free = sk->sk_rx_skb_cache;
1735 sk->sk_rx_skb_cache = NULL;
Florian Westphale7942d02017-07-30 03:57:18 +02001736 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001737 } else {
1738 if (tcp_add_backlog(sk, skb))
1739 goto discard_and_relse;
1740 skb_to_free = NULL;
Zhu Yi6b03a532010-03-04 18:01:41 +00001741 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 bh_unlock_sock(sk);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001743 if (skb_to_free)
1744 __kfree_skb(skb_to_free);
Eric Dumazete994b2f2015-10-02 11:43:39 -07001745put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001746 if (refcounted)
1747 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 return ret ? -1 : 0;
1749
1750no_tcp_socket:
1751 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1752 goto discard_it;
1753
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001754 tcp_v6_fill_cb(skb, hdr, th);
1755
Eric Dumazet12e25e12015-06-03 23:49:21 -07001756 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001757csum_error:
Jakub Kicinski709c0312021-05-14 13:04:25 -07001758 trace_tcp_bad_csum(skb);
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001759 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001761 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001763 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764 }
1765
1766discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 kfree_skb(skb);
1768 return 0;
1769
1770discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001771 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001772 if (refcounted)
1773 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 goto discard_it;
1775
1776do_time_wait:
1777 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001778 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 goto discard_it;
1780 }
1781
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001782 tcp_v6_fill_cb(skb, hdr, th);
1783
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001784 if (tcp_checksum_complete(skb)) {
1785 inet_twsk_put(inet_twsk(sk));
1786 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 }
1788
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001789 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 case TCP_TW_SYN:
1791 {
1792 struct sock *sk2;
1793
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001794 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001795 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001796 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001797 &ipv6_hdr(skb)->daddr,
David Ahern24b711e2018-07-19 12:41:18 -07001798 ntohs(th->dest),
1799 tcp_v6_iif_l3_slave(skb),
David Ahern4297a0e2017-08-07 08:44:21 -07001800 sdif);
Ian Morris53b24b82015-03-29 14:00:05 +01001801 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001802 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001803 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001805 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001806 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 goto process;
1808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 }
Gustavo A. R. Silva275757e62017-10-16 16:36:52 -05001810 /* to ACK */
Joe Perchesa8eceea2020-03-12 15:50:22 -07001811 fallthrough;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 case TCP_TW_ACK:
1813 tcp_v6_timewait_ack(sk, skb);
1814 break;
1815 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001816 tcp_v6_send_reset(sk, skb);
1817 inet_twsk_deschedule_put(inet_twsk(sk));
1818 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001819 case TCP_TW_SUCCESS:
1820 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821 }
1822 goto discard_it;
1823}
1824
Paolo Abeni97ff7ff2019-05-03 17:01:38 +02001825INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
Eric Dumazetc7109982012-07-26 12:18:11 +00001826{
1827 const struct ipv6hdr *hdr;
1828 const struct tcphdr *th;
1829 struct sock *sk;
1830
1831 if (skb->pkt_type != PACKET_HOST)
1832 return;
1833
1834 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1835 return;
1836
1837 hdr = ipv6_hdr(skb);
1838 th = tcp_hdr(skb);
1839
1840 if (th->doff < sizeof(struct tcphdr) / 4)
1841 return;
1842
Eric Dumazet870c3152014-10-17 09:17:20 -07001843 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001844 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1845 &hdr->saddr, th->source,
1846 &hdr->daddr, ntohs(th->dest),
David Ahern4297a0e2017-08-07 08:44:21 -07001847 inet6_iif(skb), inet6_sdif(skb));
Eric Dumazetc7109982012-07-26 12:18:11 +00001848 if (sk) {
1849 skb->sk = sk;
1850 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001851 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001852 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001853
Eric Dumazetc7109982012-07-26 12:18:11 +00001854 if (dst)
Eric Dumazet93a77c12019-03-19 07:01:08 -07001855 dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001856 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001857 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001858 skb_dst_set_noref(skb, dst);
1859 }
1860 }
1861}
1862
David S. Millerccb7c412010-12-01 18:09:13 -08001863static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1864 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1865 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001866 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001867};
1868
Eric Dumazetdd2e0b82020-06-19 12:12:35 -07001869INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
1870{
1871 struct ipv6_pinfo *np = inet6_sk(sk);
1872
1873 __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
1874}
1875
Mat Martineau35b2c322020-01-09 07:59:21 -08001876const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001877 .queue_xmit = inet6_csk_xmit,
1878 .send_check = tcp_v6_send_check,
1879 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001880 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001881 .conn_request = tcp_v6_conn_request,
1882 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001883 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001884 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001885 .setsockopt = ipv6_setsockopt,
1886 .getsockopt = ipv6_getsockopt,
1887 .addr2sockaddr = inet6_csk_addr2sockaddr,
1888 .sockaddr_len = sizeof(struct sockaddr_in6),
Neal Cardwell4fab9072014-08-14 12:40:05 -04001889 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890};
1891
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001892#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001893static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001894 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001895 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001896 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001897};
David S. Millera9286302006-11-14 19:53:22 -08001898#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001899
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900/*
1901 * TCP over IPv4 via INET6 API
1902 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001903static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001904 .queue_xmit = ip_queue_xmit,
1905 .send_check = tcp_v4_send_check,
1906 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001907 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001908 .conn_request = tcp_v6_conn_request,
1909 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001910 .net_header_len = sizeof(struct iphdr),
1911 .setsockopt = ipv6_setsockopt,
1912 .getsockopt = ipv6_getsockopt,
1913 .addr2sockaddr = inet6_csk_addr2sockaddr,
1914 .sockaddr_len = sizeof(struct sockaddr_in6),
Neal Cardwell4fab9072014-08-14 12:40:05 -04001915 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916};
1917
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001918#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001919static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001920 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001921 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001922 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001923};
David S. Millera9286302006-11-14 19:53:22 -08001924#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001925
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926/* NOTE: A lot of things set to zero explicitly by call to
1927 * sk_alloc() so need not be done here.
1928 */
1929static int tcp_v6_init_sock(struct sock *sk)
1930{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001931 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932
Neal Cardwell900f65d2012-04-19 09:55:21 +00001933 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001935 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001937#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001938 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001939#endif
1940
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 return 0;
1942}
1943
Brian Haley7d06b2e2008-06-14 17:04:49 -07001944static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001947 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948}
1949
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001950#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001951/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001952static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001953 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001955 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001956 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1957 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958
1959 if (ttd < 0)
1960 ttd = 0;
1961
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962 seq_printf(seq,
1963 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001964 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 i,
1966 src->s6_addr32[0], src->s6_addr32[1],
1967 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001968 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 dest->s6_addr32[0], dest->s6_addr32[1],
1970 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001971 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001973 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001974 1, /* timers active (only the expire timer) */
1975 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001976 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001977 from_kuid_munged(seq_user_ns(seq),
1978 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001979 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980 0, /* open_requests have no inode */
1981 0, req);
1982}
1983
1984static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1985{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001986 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001987 __u16 destp, srcp;
1988 int timer_active;
1989 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001990 const struct inet_sock *inet = inet_sk(sp);
1991 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001992 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001993 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001994 int rx_queue;
1995 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
Eric Dumazetefe42082013-10-03 15:42:29 -07001997 dest = &sp->sk_v6_daddr;
1998 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001999 destp = ntohs(inet->inet_dport);
2000 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002001
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07002002 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08002003 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07002004 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002006 timer_expires = icsk->icsk_timeout;
2007 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002009 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 } else if (timer_pending(&sp->sk_timer)) {
2011 timer_active = 2;
2012 timer_expires = sp->sk_timer.expires;
2013 } else {
2014 timer_active = 0;
2015 timer_expires = jiffies;
2016 }
2017
Yafang Shao986ffdf2017-12-20 11:12:52 +08002018 state = inet_sk_state_load(sp);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002019 if (state == TCP_LISTEN)
Eric Dumazet288efe82019-11-05 14:11:53 -08002020 rx_queue = READ_ONCE(sp->sk_ack_backlog);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002021 else
2022 /* Because we don't lock the socket,
2023 * we might find a transient negative value.
2024 */
Eric Dumazetdba7d9b2019-10-10 20:17:39 -07002025 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
Eric Dumazet7db48e92019-10-10 20:17:40 -07002026 READ_ONCE(tp->copied_seq), 0);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002027
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 seq_printf(seq,
2029 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02002030 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 i,
2032 src->s6_addr32[0], src->s6_addr32[1],
2033 src->s6_addr32[2], src->s6_addr32[3], srcp,
2034 dest->s6_addr32[0], dest->s6_addr32[1],
2035 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002036 state,
Eric Dumazet0f317462019-10-10 20:17:41 -07002037 READ_ONCE(tp->write_seq) - tp->snd_una,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002038 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00002040 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002041 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002042 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002043 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 sock_i_ino(sp),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002045 refcount_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002046 jiffies_to_clock_t(icsk->icsk_rto),
2047 jiffies_to_clock_t(icsk->icsk_ack.ato),
Wei Wang31954cd2019-01-25 10:53:19 -08002048 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07002049 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002050 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002051 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07002052 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 );
2054}
2055
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002056static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002057 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058{
Eric Dumazet789f5582015-04-12 18:51:09 -07002059 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00002060 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062
Eric Dumazetefe42082013-10-03 15:42:29 -07002063 dest = &tw->tw_v6_daddr;
2064 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065 destp = ntohs(tw->tw_dport);
2066 srcp = ntohs(tw->tw_sport);
2067
2068 seq_printf(seq,
2069 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00002070 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 i,
2072 src->s6_addr32[0], src->s6_addr32[1],
2073 src->s6_addr32[2], src->s6_addr32[3], srcp,
2074 dest->s6_addr32[0], dest->s6_addr32[1],
2075 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2076 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002077 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002078 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079}
2080
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081static int tcp6_seq_show(struct seq_file *seq, void *v)
2082{
2083 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002084 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
2086 if (v == SEQ_START_TOKEN) {
2087 seq_puts(seq,
2088 " sl "
2089 "local_address "
2090 "remote_address "
2091 "st tx_queue rx_queue tr tm->when retrnsmt"
2092 " uid timeout inode\n");
2093 goto out;
2094 }
2095 st = seq->private;
2096
Eric Dumazet079096f2015-10-02 11:43:32 -07002097 if (sk->sk_state == TCP_TIME_WAIT)
2098 get_timewait6_sock(seq, v, st->num);
2099 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002100 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002101 else
2102 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103out:
2104 return 0;
2105}
2106
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002107static const struct seq_operations tcp6_seq_ops = {
2108 .show = tcp6_seq_show,
2109 .start = tcp_seq_start,
2110 .next = tcp_seq_next,
2111 .stop = tcp_seq_stop,
2112};
2113
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116};
2117
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002118int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119{
Christoph Hellwigc3506372018-04-10 19:42:55 +02002120 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2121 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002122 return -ENOMEM;
2123 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124}
2125
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07002126void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002128 remove_proc_entry("tcp6", net->proc_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129}
2130#endif
2131
2132struct proto tcpv6_prot = {
2133 .name = "TCPv6",
2134 .owner = THIS_MODULE,
2135 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002136 .pre_connect = tcp_v6_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 .connect = tcp_v6_connect,
2138 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002139 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 .ioctl = tcp_ioctl,
2141 .init = tcp_v6_init_sock,
2142 .destroy = tcp_v6_destroy_sock,
2143 .shutdown = tcp_shutdown,
2144 .setsockopt = tcp_setsockopt,
2145 .getsockopt = tcp_getsockopt,
Stanislav Fomichev9cacf812021-01-15 08:34:59 -08002146 .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01002147 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002149 .sendmsg = tcp_sendmsg,
2150 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002152 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05002153 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002154 .unhash = inet_unhash,
2155 .get_port = inet_csk_get_port,
Cong Wang8a59f9d2021-03-30 19:32:31 -07002156#ifdef CONFIG_BPF_SYSCALL
2157 .psock_update_sk_prot = tcp_bpf_update_proto,
2158#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07002160 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07002161 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 .sockets_allocated = &tcp_sockets_allocated,
2163 .memory_allocated = &tcp_memory_allocated,
2164 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002165 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07002166 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08002167 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2168 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 .max_header = MAX_TCP_HEADER,
2170 .obj_size = sizeof(struct tcp6_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08002171 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002172 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002173 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002174 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002175 .no_autobind = true,
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002176 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177};
Vinay Kumar Yadav6abde0b2020-06-02 00:07:05 +05302178EXPORT_SYMBOL_GPL(tcpv6_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
David Aherna8e3bb32017-08-28 15:14:20 -07002180/* thinking of making this const? Don't.
2181 * early_demux can change based on sysctl.
2182 */
Julia Lawall39294c32017-08-01 18:27:28 +02002183static struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00002184 .early_demux = tcp_v6_early_demux,
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -06002185 .early_demux_handler = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 .handler = tcp_v6_rcv,
2187 .err_handler = tcp_v6_err,
2188 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2189};
2190
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191static struct inet_protosw tcpv6_protosw = {
2192 .type = SOCK_STREAM,
2193 .protocol = IPPROTO_TCP,
2194 .prot = &tcpv6_prot,
2195 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002196 .flags = INET_PROTOSW_PERMANENT |
2197 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198};
2199
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002200static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002201{
Denis V. Lunev56772422008-04-03 14:28:30 -07002202 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2203 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002204}
2205
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002206static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002207{
Denis V. Lunev56772422008-04-03 14:28:30 -07002208 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002209}
2210
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002211static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002212{
Haishuang Yan1946e672016-12-28 17:52:32 +08002213 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002214}
2215
2216static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002217 .init = tcpv6_net_init,
2218 .exit = tcpv6_net_exit,
2219 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002220};
2221
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002222int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002224 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08002225
Vlad Yasevich33362882012-11-15 08:49:15 +00002226 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2227 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00002228 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00002229
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002230 /* register inet6 protocol */
2231 ret = inet6_register_protosw(&tcpv6_protosw);
2232 if (ret)
2233 goto out_tcpv6_protocol;
2234
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002235 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002236 if (ret)
2237 goto out_tcpv6_protosw;
Mat Martineauf870fa02020-01-21 16:56:15 -08002238
2239 ret = mptcpv6_init();
2240 if (ret)
2241 goto out_tcpv6_pernet_subsys;
2242
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002243out:
2244 return ret;
2245
Mat Martineauf870fa02020-01-21 16:56:15 -08002246out_tcpv6_pernet_subsys:
2247 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002248out_tcpv6_protosw:
2249 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00002250out_tcpv6_protocol:
2251 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002252 goto out;
2253}
2254
Daniel Lezcano09f77092007-12-13 05:34:58 -08002255void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002256{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002257 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002258 inet6_unregister_protosw(&tcpv6_protosw);
2259 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260}