blob: 075ee8a2df3b7f3759f69f1b1256f2e8c9c700c1 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09007 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09009 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * linux/net/ipv4/tcp.c
11 * linux/net/ipv4/tcp_input.c
12 * linux/net/ipv4/tcp_output.c
13 *
14 * Fixes:
15 * Hideaki YOSHIFUJI : sin6_scope_id support
16 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
17 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
18 * a single port at the same time.
19 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
21
Herbert Xueb4dea52008-12-29 23:04:08 -080022#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/socket.h>
27#include <linux/sockios.h>
28#include <linux/net.h>
29#include <linux/jiffies.h>
30#include <linux/in.h>
31#include <linux/in6.h>
32#include <linux/netdevice.h>
33#include <linux/init.h>
34#include <linux/jhash.h>
35#include <linux/ipsec.h>
36#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080038#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ipv6.h>
40#include <linux/icmpv6.h>
41#include <linux/random.h>
Paolo Abeni0e219ae2019-05-03 17:01:37 +020042#include <linux/indirect_call_wrapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/tcp.h>
45#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030046#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080047#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <net/ipv6.h>
49#include <net/transp_v6.h>
50#include <net/addrconf.h>
51#include <net/ip6_route.h>
52#include <net/ip6_checksum.h>
53#include <net/inet_ecn.h>
54#include <net/protocol.h>
55#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <net/snmp.h>
57#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080058#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070059#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070060#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030061#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <linux/proc_fs.h>
64#include <linux/seq_file.h>
65
Herbert Xucf80e0e2016-01-24 21:20:23 +080066#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080067#include <linux/scatterlist.h>
68
Song Liuc24b14c2017-10-23 09:20:24 -070069#include <trace/events/tcp.h>
70
Eric Dumazeta00e7442015-09-29 07:42:39 -070071static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
72static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070073 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Eric Dumazetd2489c72021-11-15 11:02:41 -080075INDIRECT_CALLABLE_SCOPE int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Stephen Hemminger3b401a82009-09-01 19:25:04 +000077static const struct inet_connection_sock_af_ops ipv6_mapped;
Mat Martineau35b2c322020-01-09 07:59:21 -080078const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080079#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +000080static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
81static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090082#else
Eric Dumazet51723932015-09-29 21:24:05 -070083static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
David Aherndea53bb2019-12-30 14:14:28 -080084 const struct in6_addr *addr,
85 int l3index)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090086{
87 return NULL;
88}
David S. Millera9286302006-11-14 19:53:22 -080089#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Eric Dumazet93a77c12019-03-19 07:01:08 -070091/* Helper returning the inet6 address from a given tcp socket.
92 * It can be used in TCP stack instead of inet6_sk(sk).
93 * This avoids a dereference and allow compiler optimizations.
Eric Dumazetf5d54762019-04-01 03:09:20 -070094 * It is a specialized version of inet6_sk_generic().
Eric Dumazet93a77c12019-03-19 07:01:08 -070095 */
96static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
97{
Eric Dumazetf5d54762019-04-01 03:09:20 -070098 unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
Eric Dumazet93a77c12019-03-19 07:01:08 -070099
Eric Dumazetf5d54762019-04-01 03:09:20 -0700100 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700101}
102
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000103static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104{
105 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000106
Eric Dumazet5037e9e2015-12-14 14:08:53 -0800107 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -0700108 const struct rt6_info *rt = (const struct rt6_info *)dst;
109
Eric Dumazet8f905c02021-12-20 06:33:30 -0800110 rcu_assign_pointer(sk->sk_rx_dst, dst);
Eric Dumazet0c0a5ef2021-10-25 09:48:16 -0700111 sk->sk_rx_dst_ifindex = skb->skb_iif;
Eric Dumazetef57c162021-10-25 09:48:17 -0700112 sk->sk_rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700113 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000114}
115
Eric Dumazet84b114b2017-05-05 06:56:54 -0700116static u32 tcp_v6_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700118 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
122}
123
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700124static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700125{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700126 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700127 ipv6_hdr(skb)->saddr.s6_addr32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700130static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
131 int addr_len)
132{
133 /* This check is replicated from tcp_v6_connect() and intended to
134 * prevent BPF program called below from accessing bytes that are out
135 * of the bound specified by user in addr_len.
136 */
137 if (addr_len < SIN6_LEN_RFC2133)
138 return -EINVAL;
139
140 sock_owned_by_me(sk);
141
142 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
143}
144
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900145static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 int addr_len)
147{
148 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900149 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800150 struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700151 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000153 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800154 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500155 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 struct dst_entry *dst;
157 int addr_type;
158 int err;
Haishuang Yan1946e672016-12-28 17:52:32 +0800159 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900161 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 return -EINVAL;
163
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900164 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000165 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
David S. Miller4c9483b2011-03-12 16:22:43 -0500167 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500170 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
171 IP6_ECN_flow_init(fl6.flowlabel);
172 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500174 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Willem de Bruijn59c820b2019-07-07 05:34:45 -0400175 if (IS_ERR(flowlabel))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 fl6_sock_release(flowlabel);
178 }
179 }
180
181 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900182 * connect() to INADDR_ANY means loopback (BSD'ism).
183 */
184
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500185 if (ipv6_addr_any(&usin->sin6_addr)) {
186 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
187 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
188 &usin->sin6_addr);
189 else
190 usin->sin6_addr = in6addr_loopback;
191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193 addr_type = ipv6_addr_type(&usin->sin6_addr);
194
Weilong Chen4c99aa42013-12-19 18:44:34 +0800195 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 return -ENETUNREACH;
197
198 if (addr_type&IPV6_ADDR_LINKLOCAL) {
199 if (addr_len >= sizeof(struct sockaddr_in6) &&
200 usin->sin6_scope_id) {
201 /* If interface is set while binding, indices
202 * must coincide.
203 */
David Ahern54dc3e32018-01-04 14:03:54 -0800204 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 return -EINVAL;
206
207 sk->sk_bound_dev_if = usin->sin6_scope_id;
208 }
209
210 /* Connect to link-local address requires an interface */
211 if (!sk->sk_bound_dev_if)
212 return -EINVAL;
213 }
214
215 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700216 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 tp->rx_opt.ts_recent = 0;
218 tp->rx_opt.ts_recent_stamp = 0;
Eric Dumazet0f317462019-10-10 20:17:41 -0700219 WRITE_ONCE(tp->write_seq, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
221
Eric Dumazetefe42082013-10-03 15:42:29 -0700222 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500223 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 /*
226 * TCP over IPv4
227 */
228
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500229 if (addr_type & IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800230 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 struct sockaddr_in sin;
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 if (__ipv6_only_sock(sk))
234 return -ENETUNREACH;
235
236 sin.sin_family = AF_INET;
237 sin.sin_port = usin->sin6_port;
238 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
239
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800240 icsk->icsk_af_ops = &ipv6_mapped;
Peter Krystadcec37a62020-01-21 16:56:18 -0800241 if (sk_is_mptcp(sk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +0100242 mptcpv6_handle_mapped(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800244#ifdef CONFIG_TCP_MD5SIG
245 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
246#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
249
250 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800251 icsk->icsk_ext_hdr_len = exthdrlen;
252 icsk->icsk_af_ops = &ipv6_specific;
Peter Krystadcec37a62020-01-21 16:56:18 -0800253 if (sk_is_mptcp(sk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +0100254 mptcpv6_handle_mapped(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800256#ifdef CONFIG_TCP_MD5SIG
257 tp->af_specific = &tcp_sock_ipv6_specific;
258#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700261 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263 return err;
264 }
265
Eric Dumazetefe42082013-10-03 15:42:29 -0700266 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
267 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
David S. Miller4c9483b2011-03-12 16:22:43 -0500269 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700270 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000271 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500272 fl6.flowi6_oif = sk->sk_bound_dev_if;
273 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500274 fl6.fl6_dport = usin->sin6_port;
275 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900276 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200278 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800279 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Paul Moore3df98d72020-09-27 22:38:26 -0400281 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700282
Sabrina Dubrocac4e85f72019-12-04 15:35:52 +0100283 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800284 if (IS_ERR(dst)) {
285 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Ian Morris63159f22015-03-29 14:00:04 +0100289 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500290 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700291 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 }
293
294 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000295 np->saddr = *saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000296 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700298 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800299 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800301 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800302 if (opt)
303 icsk->icsk_ext_hdr_len = opt->opt_flen +
304 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
307
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000308 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800311 err = inet6_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 if (err)
313 goto late_failure;
314
Tom Herbert877d1f62015-07-28 16:02:05 -0700315 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530316
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300317 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300318 if (!tp->write_seq)
Eric Dumazet0f317462019-10-10 20:17:41 -0700319 WRITE_ONCE(tp->write_seq,
320 secure_tcpv6_seq(np->saddr.s6_addr32,
321 sk->sk_v6_daddr.s6_addr32,
322 inet->inet_sport,
323 inet->inet_dport));
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700324 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
325 np->saddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700326 sk->sk_v6_daddr.s6_addr32);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Wei Wang19f6d3f2017-01-23 10:59:22 -0800329 if (tcp_fastopen_defer_connect(sk, &err))
330 return err;
331 if (err)
332 goto late_failure;
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 err = tcp_connect(sk);
335 if (err)
336 goto late_failure;
337
338 return 0;
339
340late_failure:
341 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342failure:
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000343 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 sk->sk_route_caps = 0;
345 return err;
346}
347
Eric Dumazet563d34d2012-07-23 09:48:52 +0200348static void tcp_v6_mtu_reduced(struct sock *sk)
349{
350 struct dst_entry *dst;
Eric Dumazetc7bb4b82021-07-08 00:21:09 -0700351 u32 mtu;
Eric Dumazet563d34d2012-07-23 09:48:52 +0200352
353 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
354 return;
355
Eric Dumazetc7bb4b82021-07-08 00:21:09 -0700356 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
357
358 /* Drop requests trying to increase our current mss.
359 * Check done in __ip6_rt_update_pmtu() is too late.
360 */
361 if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
362 return;
363
364 dst = inet6_csk_update_pmtu(sk, mtu);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200365 if (!dst)
366 return;
367
368 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
369 tcp_sync_mss(sk, dst_mtu(dst));
370 tcp_simple_retransmit(sk);
371 }
372}
373
Stefano Brivio32bbd872018-11-08 12:19:21 +0100374static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700375 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800377 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300378 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700379 struct net *net = dev_net(skb->dev);
380 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700382 struct tcp_sock *tp;
383 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800385 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Eric Dumazet22150892015-03-22 10:22:23 -0700388 sk = __inet6_lookup_established(net, &tcp_hashinfo,
389 &hdr->daddr, th->dest,
390 &hdr->saddr, ntohs(th->source),
David Ahern4297a0e2017-08-07 08:44:21 -0700391 skb->dev->ifindex, inet6_sdif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Eric Dumazet22150892015-03-22 10:22:23 -0700393 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700394 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
395 ICMP6_MIB_INERRORS);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100396 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 }
398
399 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700400 inet_twsk_put(inet_twsk(sk));
Stefano Brivio32bbd872018-11-08 12:19:21 +0100401 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 }
Eric Dumazet22150892015-03-22 10:22:23 -0700403 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800404 fatal = icmpv6_err_convert(type, code, &err);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100405 if (sk->sk_state == TCP_NEW_SYN_RECV) {
406 tcp_req_err(sk, seq, fatal);
407 return 0;
408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200411 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700412 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 if (sk->sk_state == TCP_CLOSE)
415 goto out;
416
Eric Dumazet790eb672021-10-25 09:48:22 -0700417 if (static_branch_unlikely(&ip6_min_hopcount)) {
418 /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
419 if (ipv6_hdr(skb)->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
420 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
421 goto out;
422 }
Stephen Hemmingere802af92010-04-22 15:24:53 -0700423 }
424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700426 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
Eric Dumazetd983ea62019-10-10 20:17:38 -0700427 fastopen = rcu_dereference(tp->fastopen_rsk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700428 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700430 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700431 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 goto out;
433 }
434
Eric Dumazet93a77c12019-03-19 07:01:08 -0700435 np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
David S. Millerec18d9a2012-07-12 00:25:15 -0700437 if (type == NDISC_REDIRECT) {
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100438 if (!sock_owned_by_user(sk)) {
439 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
David S. Millerec18d9a2012-07-12 00:25:15 -0700440
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100441 if (dst)
442 dst->ops->redirect(dst, sk, skb);
443 }
Christoph Paasch50a75a82013-04-07 04:53:15 +0000444 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700445 }
446
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazetc7bb4b82021-07-08 00:21:09 -0700448 u32 mtu = ntohl(info);
449
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000450 /* We are not interested in TCP_LISTEN and open_requests
451 * (SYN-ACKs send out by Linux are always <576bytes so
452 * they should go through unfragmented).
453 */
454 if (sk->sk_state == TCP_LISTEN)
455 goto out;
456
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100457 if (!ip6_sk_accept_pmtu(sk))
458 goto out;
459
Eric Dumazetc7bb4b82021-07-08 00:21:09 -0700460 if (mtu < IPV6_MIN_MTU)
461 goto out;
462
463 WRITE_ONCE(tp->mtu_info, mtu);
464
Eric Dumazet563d34d2012-07-23 09:48:52 +0200465 if (!sock_owned_by_user(sk))
466 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000467 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800468 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000469 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 goto out;
471 }
472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700474 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700477 case TCP_SYN_RECV:
478 /* Only in fast or simultaneous open. If a fast open socket is
Randy Dunlap634a63e2020-09-17 21:35:17 -0700479 * already accepted it is treated as a connected one below.
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700480 */
Ian Morris63159f22015-03-29 14:00:04 +0100481 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700482 break;
483
Eric Dumazet45af29c2020-05-24 11:00:02 -0700484 ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 sk->sk_err = err;
Alexander Aringe3ae2362021-06-27 18:48:21 -0400488 sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489
490 tcp_done(sk);
491 } else
492 sk->sk_err_soft = err;
493 goto out;
Eric Dumazetd2924562020-05-27 17:34:58 -0700494 case TCP_LISTEN:
495 break;
496 default:
497 /* check if this ICMP message allows revert of backoff.
498 * (see RFC 6069)
499 */
500 if (!fastopen && type == ICMPV6_DEST_UNREACH &&
501 code == ICMPV6_NOROUTE)
502 tcp_ld_RTO_revert(sk, seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 }
504
505 if (!sock_owned_by_user(sk) && np->recverr) {
506 sk->sk_err = err;
Alexander Aringe3ae2362021-06-27 18:48:21 -0400507 sk_error_report(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 } else
509 sk->sk_err_soft = err;
510
511out:
512 bh_unlock_sock(sk);
513 sock_put(sk);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100514 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515}
516
517
Eric Dumazet0f935db2015-09-25 07:39:21 -0700518static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300519 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000520 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700521 struct tcp_fastopen_cookie *foc,
Martin KaFai Lau331fca42020-08-20 12:00:52 -0700522 enum tcp_synack_type synack_type,
523 struct sk_buff *syn_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700525 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700526 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400527 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300528 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800529 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000530 int err = -ENOMEM;
Wei Wangac8f1712020-09-09 17:50:48 -0700531 u8 tclass;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000533 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700534 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
535 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800536 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000537
Martin KaFai Lau331fca42020-08-20 12:00:52 -0700538 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
Neal Cardwell94942182012-06-28 12:34:20 +0000539
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700541 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
542 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Eric Dumazet634fb9792013-10-09 15:21:29 -0700544 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100545 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100546 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
547
Wei Wangac8f1712020-09-09 17:50:48 -0700548 tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
Wei Wang8ef44b62020-12-08 09:55:08 -0800549 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
550 (np->tclass & INET_ECN_MASK) :
Alexander Duyck861602b2020-11-19 13:23:51 -0800551 np->tclass;
Alexander Duyck407c85c2020-11-20 19:47:44 -0800552
553 if (!INET_ECN_is_capable(tclass) &&
554 tcp_bpf_ca_needs_ecn((struct sock *)req))
555 tclass |= INET_ECN_ECT_0;
556
557 rcu_read_lock();
558 opt = ireq->ipv6_opt;
Huw Davies56ac42b2016-06-27 15:05:28 -0400559 if (!opt)
560 opt = rcu_dereference(np->opt);
Alexander Ovechkin43b90bf2021-07-09 18:28:23 +0300561 err = ip6_xmit(sk, skb, fl6, skb->mark ? : sk->sk_mark, opt,
Alexander Duyck861602b2020-11-19 13:23:51 -0800562 tclass, sk->sk_priority);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800563 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200564 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566
567done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 return err;
569}
570
Octavian Purdila72659ec2010-01-17 19:09:39 -0800571
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700572static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573{
Huw Davies56ac42b2016-06-27 15:05:28 -0400574 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet12c86912021-10-25 09:48:25 -0700575 consume_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576}
577
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800578#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700579static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
David Aherndea53bb2019-12-30 14:14:28 -0800580 const struct in6_addr *addr,
581 int l3index)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800582{
David Aherndea53bb2019-12-30 14:14:28 -0800583 return tcp_md5_do_lookup(sk, l3index,
584 (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800585}
586
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700587static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700588 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800589{
David Aherndea53bb2019-12-30 14:14:28 -0800590 int l3index;
591
592 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
593 addr_sk->sk_bound_dev_if);
594 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
595 l3index);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800596}
597
Ivan Delalande8917a772017-06-15 18:07:07 -0700598static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
Christoph Hellwigd4c19c42020-07-23 08:09:05 +0200599 sockptr_t optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800600{
601 struct tcp_md5sig cmd;
602 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
David Aherndea53bb2019-12-30 14:14:28 -0800603 int l3index = 0;
Ivan Delalande8917a772017-06-15 18:07:07 -0700604 u8 prefixlen;
Leonard Cresteza76c2312021-10-15 10:26:05 +0300605 u8 flags;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800606
607 if (optlen < sizeof(cmd))
608 return -EINVAL;
609
Christoph Hellwigd4c19c42020-07-23 08:09:05 +0200610 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800611 return -EFAULT;
612
613 if (sin6->sin6_family != AF_INET6)
614 return -EINVAL;
615
Leonard Cresteza76c2312021-10-15 10:26:05 +0300616 flags = cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX;
617
Ivan Delalande8917a772017-06-15 18:07:07 -0700618 if (optname == TCP_MD5SIG_EXT &&
619 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
620 prefixlen = cmd.tcpm_prefixlen;
621 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
622 prefixlen > 32))
623 return -EINVAL;
624 } else {
625 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
626 }
627
Leonard Cresteza76c2312021-10-15 10:26:05 +0300628 if (optname == TCP_MD5SIG_EXT && cmd.tcpm_ifindex &&
David Ahern6b102db2019-12-30 14:14:29 -0800629 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
630 struct net_device *dev;
631
632 rcu_read_lock();
633 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
634 if (dev && netif_is_l3_master(dev))
635 l3index = dev->ifindex;
636 rcu_read_unlock();
637
638 /* ok to reference set/not set outside of rcu;
639 * right now device MUST be an L3 master
640 */
641 if (!dev || !l3index)
642 return -EINVAL;
643 }
644
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800645 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700646 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000647 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
David Ahern6b102db2019-12-30 14:14:29 -0800648 AF_INET, prefixlen,
Leonard Cresteza76c2312021-10-15 10:26:05 +0300649 l3index, flags);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000650 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
Leonard Cresteza76c2312021-10-15 10:26:05 +0300651 AF_INET6, prefixlen, l3index, flags);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800652 }
653
654 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
655 return -EINVAL;
656
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000657 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
658 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
Leonard Cresteza76c2312021-10-15 10:26:05 +0300659 AF_INET, prefixlen, l3index, flags,
David Aherndea53bb2019-12-30 14:14:28 -0800660 cmd.tcpm_key, cmd.tcpm_keylen,
661 GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800662
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000663 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
Leonard Cresteza76c2312021-10-15 10:26:05 +0300664 AF_INET6, prefixlen, l3index, flags,
David Aherndea53bb2019-12-30 14:14:28 -0800665 cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800666}
667
Eric Dumazet19689e32016-06-27 18:51:53 +0200668static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
669 const struct in6_addr *daddr,
670 const struct in6_addr *saddr,
671 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800672{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800673 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700674 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200675 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900676
Eric Dumazet19689e32016-06-27 18:51:53 +0200677 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800678 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000679 bp->saddr = *saddr;
680 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700681 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700682 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800683
Eric Dumazet19689e32016-06-27 18:51:53 +0200684 _th = (struct tcphdr *)(bp + 1);
685 memcpy(_th, th, sizeof(*th));
686 _th->check = 0;
687
688 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
689 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
690 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800691 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700692}
David S. Millerc7da57a2007-10-26 00:41:21 -0700693
Eric Dumazet19689e32016-06-27 18:51:53 +0200694static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000695 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400696 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700697{
698 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800699 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700700
701 hp = tcp_get_md5sig_pool();
702 if (!hp)
703 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800704 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700705
Herbert Xucf80e0e2016-01-24 21:20:23 +0800706 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700707 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200708 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700709 goto clear_hash;
710 if (tcp_md5_hash_key(hp, key))
711 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800712 ahash_request_set_crypt(req, NULL, md5_hash, 0);
713 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800714 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800715
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800716 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800717 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700718
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800719clear_hash:
720 tcp_put_md5sig_pool();
721clear_hash_noput:
722 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700723 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800724}
725
Eric Dumazet39f8e582015-03-24 15:58:55 -0700726static int tcp_v6_md5_hash_skb(char *md5_hash,
727 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400728 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400729 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800730{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000731 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700732 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800733 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400734 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800735
Eric Dumazet39f8e582015-03-24 15:58:55 -0700736 if (sk) { /* valid for establish/request sockets */
737 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700738 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700739 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000740 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700741 saddr = &ip6h->saddr;
742 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800743 }
Adam Langley49a72df2008-07-19 00:01:42 -0700744
745 hp = tcp_get_md5sig_pool();
746 if (!hp)
747 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800748 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700749
Herbert Xucf80e0e2016-01-24 21:20:23 +0800750 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700751 goto clear_hash;
752
Eric Dumazet19689e32016-06-27 18:51:53 +0200753 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700754 goto clear_hash;
755 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
756 goto clear_hash;
757 if (tcp_md5_hash_key(hp, key))
758 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800759 ahash_request_set_crypt(req, NULL, md5_hash, 0);
760 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700761 goto clear_hash;
762
763 tcp_put_md5sig_pool();
764 return 0;
765
766clear_hash:
767 tcp_put_md5sig_pool();
768clear_hash_noput:
769 memset(md5_hash, 0, 16);
770 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800771}
772
Eric Dumazetba8e2752015-10-02 11:43:28 -0700773#endif
774
775static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
David Ahernd14c77e2019-12-30 14:14:26 -0800776 const struct sk_buff *skb,
777 int dif, int sdif)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800778{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700779#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400780 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800781 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000782 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400783 const struct tcphdr *th = tcp_hdr(skb);
David Aherndea53bb2019-12-30 14:14:28 -0800784 int genhash, l3index;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800785 u8 newhash[16];
786
David Aherndea53bb2019-12-30 14:14:28 -0800787 /* sdif set, means packet ingressed via a device
788 * in an L3 domain and dif is set to the l3mdev
789 */
790 l3index = sdif ? dif : 0;
791
792 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr, l3index);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900793 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800794
David S. Miller785957d2008-07-30 03:03:15 -0700795 /* We've parsed the options - do we have a hash? */
796 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700797 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700798
799 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700800 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700801 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800802 }
803
David S. Miller785957d2008-07-30 03:03:15 -0700804 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700805 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700806 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800807 }
808
809 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700810 genhash = tcp_v6_md5_hash_skb(newhash,
811 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700812 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700813
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800814 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700815 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
David Aherndea53bb2019-12-30 14:14:28 -0800816 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
Joe Perchese87cc472012-05-13 21:56:26 +0000817 genhash ? "failed" : "mismatch",
818 &ip6h->saddr, ntohs(th->source),
David Aherndea53bb2019-12-30 14:14:28 -0800819 &ip6h->daddr, ntohs(th->dest), l3index);
Eric Dumazetff74e232015-03-24 15:58:54 -0700820 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800821 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700822#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700823 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800824}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800825
Eric Dumazetb40cf182015-09-25 07:39:08 -0700826static void tcp_v6_init_req(struct request_sock *req,
827 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300828 struct sk_buff *skb)
829{
David Ahernc2027d12018-12-12 15:27:38 -0800830 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
Octavian Purdila16bea702014-06-25 17:09:53 +0300831 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700832 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300833
834 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
835 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
836
Octavian Purdila16bea702014-06-25 17:09:53 +0300837 /* So that link locals have meaning */
David Ahernc2027d12018-12-12 15:27:38 -0800838 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300839 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700840 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300841
Eric Dumazet04317da2014-09-05 15:33:32 -0700842 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700843 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700844 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300845 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
846 np->rxopt.bits.rxohlim || np->repflow)) {
Reshetova, Elena63354792017-06-30 13:07:58 +0300847 refcount_inc(&skb->users);
Octavian Purdila16bea702014-06-25 17:09:53 +0300848 ireq->pktopts = skb;
849 }
850}
851
Eric Dumazetf9646292015-09-29 07:42:50 -0700852static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
Florian Westphal7ea851d2020-11-30 16:36:30 +0100853 struct sk_buff *skb,
Eric Dumazetf9646292015-09-29 07:42:50 -0700854 struct flowi *fl,
Florian Westphal7ea851d2020-11-30 16:36:30 +0100855 struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +0300856{
Florian Westphal7ea851d2020-11-30 16:36:30 +0100857 tcp_v6_init_req(req, sk, skb);
858
859 if (security_inet_conn_request(sk, skb, req))
860 return NULL;
861
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700862 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300863}
864
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800865struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700867 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300868 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700869 .send_ack = tcp_v6_reqsk_send_ack,
870 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800871 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800872 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873};
874
Mat Martineau35b2c322020-01-09 07:59:21 -0800875const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300876 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
877 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300878#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700879 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000880 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800881#endif
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300882#ifdef CONFIG_SYN_COOKIES
883 .cookie_init_seq = cookie_v6_init_sequence,
884#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300885 .route_req = tcp_v6_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700886 .init_seq = tcp_v6_init_seq,
887 .init_ts_off = tcp_v6_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300888 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300889};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800890
Eric Dumazeta00e7442015-09-29 07:42:39 -0700891static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800892 u32 ack, u32 win, u32 tsval, u32 tsecr,
893 int oif, struct tcp_md5sig_key *key, int rst,
Eric Dumazete9a5dce2019-09-24 08:01:15 -0700894 u8 tclass, __be32 label, u32 priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400896 const struct tcphdr *th = tcp_hdr(skb);
897 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500899 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800900 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800901 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800902 unsigned int tot_len = sizeof(struct tcphdr);
Florian Westphaldc87efd2021-04-01 16:19:44 -0700903 __be32 mrst = 0, *topt;
Eric Dumazetadf30902009-06-02 05:19:30 +0000904 struct dst_entry *dst;
Jon Maxwell00483692018-05-10 16:53:51 +1000905 __u32 mark = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
Andrey Vaginee684b62013-02-11 05:50:19 +0000907 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700908 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800909#ifdef CONFIG_TCP_MD5SIG
910 if (key)
911 tot_len += TCPOLEN_MD5SIG_ALIGNED;
912#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Florian Westphaldc87efd2021-04-01 16:19:44 -0700914#ifdef CONFIG_MPTCP
915 if (rst && !key) {
916 mrst = mptcp_reset_option(skb);
917
918 if (mrst)
919 tot_len += sizeof(__be32);
920 }
921#endif
922
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
924 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100925 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 return;
927
928 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
929
Johannes Bergd58ff352017-06-16 14:29:23 +0200930 t1 = skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700931 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
933 /* Swap the send and the receive. */
934 memset(t1, 0, sizeof(*t1));
935 t1->dest = th->source;
936 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700937 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 t1->seq = htonl(seq);
939 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700940 t1->ack = !rst || !th->ack;
941 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800943
Al Viroe69a4ad2006-11-14 20:56:00 -0800944 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900945
Andrey Vaginee684b62013-02-11 05:50:19 +0000946 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800947 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
948 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000949 *topt++ = htonl(tsval);
950 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 }
952
Florian Westphaldc87efd2021-04-01 16:19:44 -0700953 if (mrst)
954 *topt++ = mrst;
955
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800956#ifdef CONFIG_TCP_MD5SIG
957 if (key) {
958 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
959 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700960 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700961 &ipv6_hdr(skb)->saddr,
962 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800963 }
964#endif
965
David S. Miller4c9483b2011-03-12 16:22:43 -0500966 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000967 fl6.daddr = ipv6_hdr(skb)->saddr;
968 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100969 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
David S. Millere5700af2010-04-21 14:59:20 -0700971 buff->ip_summed = CHECKSUM_PARTIAL;
David S. Millere5700af2010-04-21 14:59:20 -0700972
David S. Miller4c9483b2011-03-12 16:22:43 -0500973 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974
David S. Miller4c9483b2011-03-12 16:22:43 -0500975 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900976 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700977 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800978 else {
979 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
980 oif = skb->skb_iif;
981
982 fl6.flowi6_oif = oif;
983 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700984
Eric Dumazetc67b8552019-06-08 17:58:51 -0700985 if (sk) {
986 if (sk->sk_state == TCP_TIME_WAIT) {
987 mark = inet_twsk(sk)->tw_mark;
988 /* autoflowlabel relies on buff->hash */
989 skb_set_hash(buff, inet_twsk(sk)->tw_txhash,
990 PKT_HASH_TYPE_L4);
991 } else {
992 mark = sk->sk_mark;
993 }
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700994 buff->tstamp = tcp_transmit_time(sk);
Eric Dumazetc67b8552019-06-08 17:58:51 -0700995 }
Jon Maxwell00483692018-05-10 16:53:51 +1000996 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
David S. Miller1958b852011-03-12 16:36:19 -0500997 fl6.fl6_dport = t1->dest;
998 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900999 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
Paul Moore3df98d72020-09-27 22:38:26 -04001000 security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001002 /* Pass a socket to ip6_dst_lookup either it is for RST
1003 * Underlying function will use this to retrieve the network
1004 * namespace
1005 */
Sabrina Dubrocac4e85f72019-12-04 15:35:52 +01001006 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -08001007 if (!IS_ERR(dst)) {
1008 skb_dst_set(buff, dst);
Wei Wange92dd772020-09-08 14:29:02 -07001009 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
1010 tclass & ~INET_ECN_MASK, priority);
Eric Dumazetc10d9312016-04-29 14:16:47 -07001011 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -08001012 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -07001013 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -08001014 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015 }
1016
1017 kfree_skb(buff);
1018}
1019
Eric Dumazeta00e7442015-09-29 07:42:39 -07001020static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001021{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001022 const struct tcphdr *th = tcp_hdr(skb);
Eric Dumazet323a53c2019-06-05 07:55:09 -07001023 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001024 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -07001025 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001026#ifdef CONFIG_TCP_MD5SIG
1027 const __u8 *hash_location = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001028 unsigned char newhash[16];
1029 int genhash;
1030 struct sock *sk1 = NULL;
1031#endif
Eric Dumazet323a53c2019-06-05 07:55:09 -07001032 __be32 label = 0;
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001033 u32 priority = 0;
Eric Dumazet323a53c2019-06-05 07:55:09 -07001034 struct net *net;
Song Liuc24b14c2017-10-23 09:20:24 -07001035 int oif = 0;
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001036
1037 if (th->rst)
1038 return;
1039
Eric Dumazetc3658e82014-11-25 07:40:04 -08001040 /* If sk not NULL, it means we did a successful lookup and incoming
1041 * route had to be correct. prequeue might have dropped our dst.
1042 */
1043 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001044 return;
1045
Eric Dumazet39209672019-06-07 12:23:48 -07001046 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001047#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -07001048 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +00001049 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +01001050 if (sk && sk_fullsock(sk)) {
David Aherndea53bb2019-12-30 14:14:28 -08001051 int l3index;
1052
1053 /* sdif set, means packet ingressed via a device
1054 * in an L3 domain and inet_iif is set to it.
1055 */
1056 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1057 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
Florian Westphale46787f2015-12-21 21:29:25 +01001058 } else if (hash_location) {
David Ahernd14c77e2019-12-30 14:14:26 -08001059 int dif = tcp_v6_iif_l3_slave(skb);
1060 int sdif = tcp_v6_sdif(skb);
David Aherndea53bb2019-12-30 14:14:28 -08001061 int l3index;
David Ahernd14c77e2019-12-30 14:14:26 -08001062
Shawn Lu658ddaa2012-01-31 22:35:48 +00001063 /*
1064 * active side is lost. Try to find listening socket through
1065 * source port, and then find md5 key through listening socket.
1066 * we are not loose security here:
1067 * Incoming packet is checked with md5 hash with finding key,
1068 * no RST generated if md5 hash doesn't match.
1069 */
Eric Dumazet323a53c2019-06-05 07:55:09 -07001070 sk1 = inet6_lookup_listener(net,
Craig Galleka5836362016-02-10 11:50:38 -05001071 &tcp_hashinfo, NULL, 0,
1072 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +00001073 th->source, &ipv6h->daddr,
David Ahernd14c77e2019-12-30 14:14:26 -08001074 ntohs(th->source), dif, sdif);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001075 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001076 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001077
David Aherndea53bb2019-12-30 14:14:28 -08001078 /* sdif set, means packet ingressed via a device
1079 * in an L3 domain and dif is set to it.
1080 */
1081 l3index = tcp_v6_sdif(skb) ? dif : 0;
1082
1083 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001084 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001085 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001086
Eric Dumazet39f8e582015-03-24 15:58:55 -07001087 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001088 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001089 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001090 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001091#endif
1092
1093 if (th->ack)
1094 seq = ntohl(th->ack_seq);
1095 else
1096 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1097 (th->doff << 2);
1098
Song Liuc24b14c2017-10-23 09:20:24 -07001099 if (sk) {
1100 oif = sk->sk_bound_dev_if;
Eric Dumazet052e0692019-07-10 06:40:09 -07001101 if (sk_fullsock(sk)) {
1102 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1103
Song Liu5c487bb2018-02-06 20:50:23 -08001104 trace_tcp_send_reset(sk, skb);
Eric Dumazet052e0692019-07-10 06:40:09 -07001105 if (np->repflow)
1106 label = ip6_flowlabel(ipv6h);
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001107 priority = sk->sk_priority;
Eric Dumazet052e0692019-07-10 06:40:09 -07001108 }
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001109 if (sk->sk_state == TCP_TIME_WAIT) {
Eric Dumazet50a8acc2019-06-05 07:55:10 -07001110 label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001111 priority = inet_twsk(sk)->tw_priority;
1112 }
Eric Dumazet323a53c2019-06-05 07:55:09 -07001113 } else {
Eric Dumazeta346abe2019-07-01 06:39:36 -07001114 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
Eric Dumazet323a53c2019-06-05 07:55:09 -07001115 label = ip6_flowlabel(ipv6h);
Song Liuc24b14c2017-10-23 09:20:24 -07001116 }
1117
Wei Wange92dd772020-09-08 14:29:02 -07001118 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
1119 ipv6_get_dsfield(ipv6h), label, priority);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001120
1121#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -07001122out:
1123 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +00001124#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001125}
1126
Eric Dumazeta00e7442015-09-29 07:42:39 -07001127static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001128 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +01001129 struct tcp_md5sig_key *key, u8 tclass,
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001130 __be32 label, u32 priority)
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001131{
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001132 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001133 tclass, label, priority);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001134}
1135
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1137{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001138 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001139 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001141 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001142 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001143 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +08001144 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001145 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001147 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148}
1149
Eric Dumazeta00e7442015-09-29 07:42:39 -07001150static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -07001151 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152{
David Aherndea53bb2019-12-30 14:14:28 -08001153 int l3index;
1154
1155 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1156
Daniel Lee3a19ce02014-05-11 20:22:13 -07001157 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1158 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1159 */
Eric Dumazet20a2b492016-08-22 11:31:10 -07001160 /* RFC 7323 2.3
1161 * The window field (SEG.WND) of every outgoing segment, with the
1162 * exception of <SYN> segments, MUST be right-shifted by
1163 * Rcv.Wind.Shift bits:
1164 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001165 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -07001166 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -07001167 tcp_rsk(req)->rcv_nxt,
1168 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001169 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
Florian Westphal95a22ca2016-12-01 11:32:06 +01001170 req->ts_recent, sk->sk_bound_dev_if,
David Aherndea53bb2019-12-30 14:14:28 -08001171 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
Wei Wange92dd772020-09-08 14:29:02 -07001172 ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173}
1174
1175
Eric Dumazet079096f2015-10-02 11:43:32 -07001176static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001178#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001179 const struct tcphdr *th = tcp_hdr(skb);
1180
Florian Westphalaf9b4732010-06-03 00:43:44 +00001181 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001182 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183#endif
1184 return sk;
1185}
1186
Petar Penkov9349d602019-07-29 09:59:14 -07001187u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1188 struct tcphdr *th, u32 *cookie)
1189{
1190 u16 mss = 0;
1191#ifdef CONFIG_SYN_COOKIES
1192 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1193 &tcp_request_sock_ipv6_ops, sk, th);
1194 if (mss) {
1195 *cookie = __cookie_v6_init_sequence(iph, th, &mss);
1196 tcp_synq_overflow(sk);
1197 }
1198#endif
1199 return mss;
1200}
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1203{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 if (skb->protocol == htons(ETH_P_IP))
1205 return tcp_v4_conn_request(sk, skb);
1206
1207 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001208 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
Jakub Kicinskidcc32f4f2021-03-17 09:55:15 -07001210 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1211 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1212 return 0;
1213 }
1214
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001215 return tcp_conn_request(&tcp6_request_sock_ops,
1216 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001219 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 return 0; /* don't send reset */
1221}
1222
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001223static void tcp_v6_restore_cb(struct sk_buff *skb)
1224{
1225 /* We need to move header back to the beginning if xfrm6_policy_check()
1226 * and tcp_v6_fill_cb() are going to be called again.
1227 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1228 */
1229 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1230 sizeof(struct inet6_skb_parm));
1231}
1232
Eric Dumazet0c271712015-09-29 07:42:48 -07001233static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001234 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001235 struct dst_entry *dst,
1236 struct request_sock *req_unhash,
1237 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001239 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001240 struct ipv6_pinfo *newnp;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001241 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001242 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 struct inet_sock *newinet;
Ricardo Dias01770a12020-11-20 11:11:33 +00001244 bool found_dup_sk = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 struct tcp_sock *newtp;
1246 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001247#ifdef CONFIG_TCP_MD5SIG
1248 struct tcp_md5sig_key *key;
David Aherndea53bb2019-12-30 14:14:28 -08001249 int l3index;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001250#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001251 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
1253 if (skb->protocol == htons(ETH_P_IP)) {
1254 /*
1255 * v6 mapped
1256 */
1257
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001258 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1259 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
Ian Morris63159f22015-03-29 14:00:04 +01001261 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 return NULL;
1263
Eric Dumazet93a77c12019-03-19 07:01:08 -07001264 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265
Eric Dumazet93a77c12019-03-19 07:01:08 -07001266 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 newtp = tcp_sk(newsk);
1268
1269 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1270
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001271 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001273 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Peter Krystadcec37a62020-01-21 16:56:18 -08001274 if (sk_is_mptcp(newsk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001275 mptcpv6_handle_mapped(newsk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001277#ifdef CONFIG_TCP_MD5SIG
1278 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1279#endif
1280
WANG Cong83eadda2017-05-09 16:59:54 -07001281 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001282 newnp->ipv6_ac_list = NULL;
1283 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 newnp->pktoptions = NULL;
1285 newnp->opt = NULL;
Eric Dumazet89e41302019-03-19 05:45:35 -07001286 newnp->mcast_oif = inet_iif(skb);
1287 newnp->mcast_hops = ip_hdr(skb)->ttl;
1288 newnp->rcv_flowinfo = 0;
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001289 if (np->repflow)
Eric Dumazet89e41302019-03-19 05:45:35 -07001290 newnp->flow_label = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001292 /*
1293 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1294 * here, tcp_create_openreq_child now does this for us, see the comment in
1295 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
1298 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001299 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 Sync it now.
1301 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001302 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303
1304 return newsk;
1305 }
1306
Eric Dumazet634fb9792013-10-09 15:21:29 -07001307 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
1309 if (sk_acceptq_is_full(sk))
1310 goto out_overflow;
1311
David S. Miller493f3772010-12-02 12:14:29 -08001312 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001313 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001314 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001316 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
1318 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001319 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001320 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001322 /*
1323 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1324 * count here, tcp_create_openreq_child now does this for us, see the
1325 * comment in that function for the gory details. -acme
1326 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
Stephen Hemminger59eed272006-08-25 15:55:43 -07001328 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001329 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001330 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
Eric Dumazet93a77c12019-03-19 07:01:08 -07001332 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
1334 newtp = tcp_sk(newsk);
1335 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001336 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
1338 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1339
Eric Dumazet634fb9792013-10-09 15:21:29 -07001340 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1341 newnp->saddr = ireq->ir_v6_loc_addr;
1342 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1343 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001345 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 First: no IPv4 options.
1348 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001349 newinet->inet_opt = NULL;
WANG Cong83eadda2017-05-09 16:59:54 -07001350 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001351 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001352 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
1354 /* Clone RX bits */
1355 newnp->rxopt.all = np->rxopt.all;
1356
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001359 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001360 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001361 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001362 if (np->repflow)
1363 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
Wei Wang8ef44b62020-12-08 09:55:08 -08001365 /* Set ToS of the new socket based upon the value of incoming SYN.
1366 * ECT bits are set later in tcp_init_transfer().
1367 */
Wei Wangac8f1712020-09-09 17:50:48 -07001368 if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
1369 newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1370
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 /* Clone native IPv6 options from listening socket (if any)
1372
1373 Yes, keeping reference count would be much more clever,
1374 but we make one more one thing there: reattach optmem
1375 to newsk.
1376 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001377 opt = ireq->ipv6_opt;
1378 if (!opt)
1379 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001380 if (opt) {
1381 opt = ipv6_dup_options(newsk, opt);
1382 RCU_INIT_POINTER(newnp->opt, opt);
1383 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001384 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001385 if (opt)
1386 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1387 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
Daniel Borkmann81164412015-01-05 23:57:48 +01001389 tcp_ca_openreq_child(newsk, dst);
1390
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001392 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Neal Cardwelld135c522012-04-22 09:45:47 +00001393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 tcp_initialize_rcv_mss(newsk);
1395
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001396 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1397 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001399#ifdef CONFIG_TCP_MD5SIG
David Aherndea53bb2019-12-30 14:14:28 -08001400 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1401
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001402 /* Copy over the MD5 key from the original socket */
David Aherndea53bb2019-12-30 14:14:28 -08001403 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
Ian Morris53b24b82015-03-29 14:00:05 +01001404 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001405 /* We're using one, so create a matching key
1406 * on the newsk structure. If we fail to get
1407 * memory, then we end up not copying the key
1408 * across. Shucks.
1409 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001410 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
Leonard Cresteza76c2312021-10-15 10:26:05 +03001411 AF_INET6, 128, l3index, key->flags, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001412 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001413 }
1414#endif
1415
Balazs Scheidler093d2822010-10-21 13:06:43 +02001416 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001417 inet_csk_prepare_forced_close(newsk);
1418 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001419 goto out;
1420 }
Ricardo Dias01770a12020-11-20 11:11:33 +00001421 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1422 &found_dup_sk);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001423 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001424 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001425
1426 /* Clone pktoptions received with SYN, if we own the req */
1427 if (ireq->pktopts) {
1428 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001429 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001430 consume_skb(ireq->pktopts);
1431 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001432 if (newnp->pktoptions) {
1433 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001434 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001435 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001436 }
Ricardo Dias01770a12020-11-20 11:11:33 +00001437 } else {
1438 if (!req_unhash && found_dup_sk) {
1439 /* This code path should only be executed in the
1440 * syncookie case only
1441 */
1442 bh_unlock_sock(newsk);
1443 sock_put(newsk);
1444 newsk = NULL;
1445 }
Eric Dumazetce105002015-10-30 09:46:12 -07001446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
1448 return newsk;
1449
1450out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001451 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001452out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001454out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001455 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 return NULL;
1457}
1458
Brian Vazquezbbd807d2021-02-01 17:41:32 +00001459INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1460 u32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001462 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 *
1464 * We have a potential double-lock case here, so even when
1465 * doing backlog processing we use the BH locking scheme.
1466 * This is because we cannot sleep with the original spinlock
1467 * held.
1468 */
Eric Dumazetd2489c72021-11-15 11:02:41 -08001469INDIRECT_CALLABLE_SCOPE
1470int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471{
Eric Dumazet93a77c12019-03-19 07:01:08 -07001472 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 struct sk_buff *opt_skb = NULL;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001474 struct tcp_sock *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476 /* Imagine: socket is IPv6. IPv4 packet arrives,
1477 goes to IPv4 receive handler and backlogged.
1478 From backlog it always goes here. Kerboom...
1479 Fortunately, tcp_rcv_established and rcv_established
1480 handle them correctly, but it is not case with
1481 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1482 */
1483
1484 if (skb->protocol == htons(ETH_P_IP))
1485 return tcp_v4_do_rcv(sk, skb);
1486
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 /*
1488 * socket locking is here for SMP purposes as backlog rcv
1489 * is currently called with bh processing disabled.
1490 */
1491
1492 /* Do Stevens' IPV6_PKTOPTIONS.
1493
1494 Yes, guys, it is the only place in our code, where we
1495 may make it not affecting IPv4.
1496 The rest of code is protocol independent,
1497 and I do not like idea to uglify IPv4.
1498
1499 Actually, all the idea behind IPV6_PKTOPTIONS
1500 looks not very well thought. For now we latch
1501 options, received in the last packet, enqueued
1502 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001503 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 */
1505 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001506 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507
1508 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet8f905c02021-12-20 06:33:30 -08001509 struct dst_entry *dst;
1510
1511 dst = rcu_dereference_protected(sk->sk_rx_dst,
1512 lockdep_sock_is_held(sk));
Eric Dumazet5d299f32012-08-06 05:09:33 +00001513
Tom Herbertbdeab992011-08-14 19:45:55 +00001514 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001515 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001516 if (dst) {
Eric Dumazet0c0a5ef2021-10-25 09:48:16 -07001517 if (sk->sk_rx_dst_ifindex != skb->skb_iif ||
Brian Vazquezbbd807d2021-02-01 17:41:32 +00001518 INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
Eric Dumazetef57c162021-10-25 09:48:17 -07001519 dst, sk->sk_rx_dst_cookie) == NULL) {
Eric Dumazet8f905c02021-12-20 06:33:30 -08001520 RCU_INIT_POINTER(sk->sk_rx_dst, NULL);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001521 dst_release(dst);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001522 }
1523 }
1524
Yafang Shao3d97d882018-05-29 23:27:31 +08001525 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 if (opt_skb)
1527 goto ipv6_pktoptions;
1528 return 0;
1529 }
1530
Eric Dumazet12e25e12015-06-03 23:49:21 -07001531 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532 goto csum_err;
1533
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001534 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001535 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1536
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 if (!nsk)
1538 goto discard;
1539
Weilong Chen4c99aa42013-12-19 18:44:34 +08001540 if (nsk != sk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 if (tcp_child_process(sk, nsk, skb))
1542 goto reset;
1543 if (opt_skb)
1544 __kfree_skb(opt_skb);
1545 return 0;
1546 }
Neil Horman47482f132011-04-06 13:07:09 -07001547 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001548 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001550 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 if (opt_skb)
1553 goto ipv6_pktoptions;
1554 return 0;
1555
1556reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001557 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558discard:
1559 if (opt_skb)
1560 __kfree_skb(opt_skb);
1561 kfree_skb(skb);
1562 return 0;
1563csum_err:
Jakub Kicinski709c0312021-05-14 13:04:25 -07001564 trace_tcp_bad_csum(skb);
Eric Dumazetc10d9312016-04-29 14:16:47 -07001565 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1566 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 goto discard;
1568
1569
1570ipv6_pktoptions:
1571 /* Do you ask, what is it?
1572
1573 1. skb was enqueued by tcp.
1574 2. skb is added to tail of read queue, rather than out of order.
1575 3. socket is not in passive state.
1576 4. Finally, it really contains options, which user wants to receive.
1577 */
1578 tp = tcp_sk(sk);
1579 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1580 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001581 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001582 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001583 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001584 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001585 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001586 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001587 if (np->repflow)
1588 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001589 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001591 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 opt_skb = xchg(&np->pktoptions, opt_skb);
1593 } else {
1594 __kfree_skb(opt_skb);
1595 opt_skb = xchg(&np->pktoptions, NULL);
1596 }
1597 }
1598
Eric Dumazet12c86912021-10-25 09:48:25 -07001599 consume_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 return 0;
1601}
1602
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001603static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1604 const struct tcphdr *th)
1605{
1606 /* This is tricky: we move IP6CB at its correct location into
1607 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1608 * _decode_session6() uses IP6CB().
1609 * barrier() makes sure compiler won't play aliasing games.
1610 */
1611 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1612 sizeof(struct inet6_skb_parm));
1613 barrier();
1614
1615 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1616 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1617 skb->len - th->doff*4);
1618 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1619 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1620 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1621 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1622 TCP_SKB_CB(skb)->sacked = 0;
Mike Maloney98aaa912017-08-22 17:08:48 -04001623 TCP_SKB_CB(skb)->has_rxtstamp =
1624 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001625}
1626
Paolo Abeni0e219ae2019-05-03 17:01:37 +02001627INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628{
David Ahern4297a0e2017-08-07 08:44:21 -07001629 int sdif = inet6_sdif(skb);
David Ahernd14c77e2019-12-30 14:14:26 -08001630 int dif = inet6_iif(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001631 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001632 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001633 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001634 struct sock *sk;
1635 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001636 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637
1638 if (skb->pkt_type != PACKET_HOST)
1639 goto discard_it;
1640
1641 /*
1642 * Count it even if it's bad.
1643 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001644 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645
1646 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1647 goto discard_it;
1648
Eric Dumazetea1627c2016-05-13 09:16:40 -07001649 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650
Eric Dumazetea1627c2016-05-13 09:16:40 -07001651 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 goto bad_packet;
1653 if (!pskb_may_pull(skb, th->doff*4))
1654 goto discard_it;
1655
Tom Herberte4f45b72014-05-02 16:29:51 -07001656 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001657 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001658
Eric Dumazetea1627c2016-05-13 09:16:40 -07001659 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001660 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001662lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001663 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
David Ahern4297a0e2017-08-07 08:44:21 -07001664 th->source, th->dest, inet6_iif(skb), sdif,
Eric Dumazet3b24d852016-04-01 08:52:17 -07001665 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 if (!sk)
1667 goto no_tcp_socket;
1668
1669process:
1670 if (sk->sk_state == TCP_TIME_WAIT)
1671 goto do_time_wait;
1672
Eric Dumazet079096f2015-10-02 11:43:32 -07001673 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1674 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001675 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001676 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001677
1678 sk = req->rsk_listener;
David Ahernd14c77e2019-12-30 14:14:26 -08001679 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001680 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001681 reqsk_put(req);
1682 goto discard_it;
1683 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001684 if (tcp_checksum_complete(skb)) {
1685 reqsk_put(req);
1686 goto csum_error;
1687 }
Eric Dumazet77166822016-02-18 05:39:18 -08001688 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Kuniyuki Iwashimad4f2c862021-06-12 21:32:20 +09001689 nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
1690 if (!nsk) {
1691 inet_csk_reqsk_queue_drop_and_put(sk, req);
1692 goto lookup;
1693 }
1694 sk = nsk;
1695 /* reuseport_migrate_sock() has already held one sk_refcnt
1696 * before returning.
1697 */
1698 } else {
1699 sock_hold(sk);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001700 }
Eric Dumazet3b24d852016-04-01 08:52:17 -07001701 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001702 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001703 if (!tcp_filter(sk, skb)) {
1704 th = (const struct tcphdr *)skb->data;
1705 hdr = ipv6_hdr(skb);
1706 tcp_v6_fill_cb(skb, hdr, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001707 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001708 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001709 if (!nsk) {
1710 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001711 if (req_stolen) {
1712 /* Another cpu got exclusive access to req
1713 * and created a full blown socket.
1714 * Try to feed this packet to this socket
1715 * instead of discarding it.
1716 */
1717 tcp_v6_restore_cb(skb);
1718 sock_put(sk);
1719 goto lookup;
1720 }
Eric Dumazet77166822016-02-18 05:39:18 -08001721 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001722 }
1723 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001724 reqsk_put(req);
1725 tcp_v6_restore_cb(skb);
1726 } else if (tcp_child_process(sk, nsk, skb)) {
1727 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001728 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001729 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001730 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001731 return 0;
1732 }
1733 }
Eric Dumazet790eb672021-10-25 09:48:22 -07001734
1735 if (static_branch_unlikely(&ip6_min_hopcount)) {
1736 /* min_hopcount can be changed concurrently from do_ipv6_setsockopt() */
1737 if (hdr->hop_limit < READ_ONCE(tcp_inet6_sk(sk)->min_hopcount)) {
1738 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1739 goto discard_and_relse;
1740 }
Stephen Hemmingere802af92010-04-22 15:24:53 -07001741 }
1742
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1744 goto discard_and_relse;
1745
David Ahernd14c77e2019-12-30 14:14:26 -08001746 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif))
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001747 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001748
Eric Dumazetac6e7802016-11-10 13:12:35 -08001749 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001751 th = (const struct tcphdr *)skb->data;
1752 hdr = ipv6_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001753 tcp_v6_fill_cb(skb, hdr, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
1755 skb->dev = NULL;
1756
Eric Dumazete994b2f2015-10-02 11:43:39 -07001757 if (sk->sk_state == TCP_LISTEN) {
1758 ret = tcp_v6_do_rcv(sk, skb);
1759 goto put_and_return;
1760 }
1761
1762 sk_incoming_cpu_update(sk);
1763
Eric Dumazetf35f8212021-11-15 11:02:46 -08001764 sk_defer_free_flush(sk);
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001765 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001766 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 ret = 0;
1768 if (!sock_owned_by_user(sk)) {
Florian Westphale7942d02017-07-30 03:57:18 +02001769 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001770 } else {
1771 if (tcp_add_backlog(sk, skb))
1772 goto discard_and_relse;
Zhu Yi6b03a532010-03-04 18:01:41 +00001773 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774 bh_unlock_sock(sk);
Eric Dumazete994b2f2015-10-02 11:43:39 -07001775put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001776 if (refcounted)
1777 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 return ret ? -1 : 0;
1779
1780no_tcp_socket:
1781 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1782 goto discard_it;
1783
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001784 tcp_v6_fill_cb(skb, hdr, th);
1785
Eric Dumazet12e25e12015-06-03 23:49:21 -07001786 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001787csum_error:
Jakub Kicinski709c0312021-05-14 13:04:25 -07001788 trace_tcp_bad_csum(skb);
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001789 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001791 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001793 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 }
1795
1796discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 kfree_skb(skb);
1798 return 0;
1799
1800discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001801 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001802 if (refcounted)
1803 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 goto discard_it;
1805
1806do_time_wait:
1807 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001808 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 goto discard_it;
1810 }
1811
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001812 tcp_v6_fill_cb(skb, hdr, th);
1813
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001814 if (tcp_checksum_complete(skb)) {
1815 inet_twsk_put(inet_twsk(sk));
1816 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817 }
1818
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001819 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 case TCP_TW_SYN:
1821 {
1822 struct sock *sk2;
1823
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001824 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001825 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001826 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001827 &ipv6_hdr(skb)->daddr,
David Ahern24b711e2018-07-19 12:41:18 -07001828 ntohs(th->dest),
1829 tcp_v6_iif_l3_slave(skb),
David Ahern4297a0e2017-08-07 08:44:21 -07001830 sdif);
Ian Morris53b24b82015-03-29 14:00:05 +01001831 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001832 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001833 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001835 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001836 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 goto process;
1838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 }
Gustavo A. R. Silva275757e62017-10-16 16:36:52 -05001840 /* to ACK */
Joe Perchesa8eceea2020-03-12 15:50:22 -07001841 fallthrough;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842 case TCP_TW_ACK:
1843 tcp_v6_timewait_ack(sk, skb);
1844 break;
1845 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001846 tcp_v6_send_reset(sk, skb);
1847 inet_twsk_deschedule_put(inet_twsk(sk));
1848 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001849 case TCP_TW_SUCCESS:
1850 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851 }
1852 goto discard_it;
1853}
1854
Paolo Abeni97ff7ff2019-05-03 17:01:38 +02001855INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
Eric Dumazetc7109982012-07-26 12:18:11 +00001856{
1857 const struct ipv6hdr *hdr;
1858 const struct tcphdr *th;
1859 struct sock *sk;
1860
1861 if (skb->pkt_type != PACKET_HOST)
1862 return;
1863
1864 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1865 return;
1866
1867 hdr = ipv6_hdr(skb);
1868 th = tcp_hdr(skb);
1869
1870 if (th->doff < sizeof(struct tcphdr) / 4)
1871 return;
1872
Eric Dumazet870c3152014-10-17 09:17:20 -07001873 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001874 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1875 &hdr->saddr, th->source,
1876 &hdr->daddr, ntohs(th->dest),
David Ahern4297a0e2017-08-07 08:44:21 -07001877 inet6_iif(skb), inet6_sdif(skb));
Eric Dumazetc7109982012-07-26 12:18:11 +00001878 if (sk) {
1879 skb->sk = sk;
1880 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001881 if (sk_fullsock(sk)) {
Eric Dumazet8f905c02021-12-20 06:33:30 -08001882 struct dst_entry *dst = rcu_dereference(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001883
Eric Dumazetc7109982012-07-26 12:18:11 +00001884 if (dst)
Eric Dumazetef57c162021-10-25 09:48:17 -07001885 dst = dst_check(dst, sk->sk_rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001886 if (dst &&
Eric Dumazet0c0a5ef2021-10-25 09:48:16 -07001887 sk->sk_rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001888 skb_dst_set_noref(skb, dst);
1889 }
1890 }
1891}
1892
David S. Millerccb7c412010-12-01 18:09:13 -08001893static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1894 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1895 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001896 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001897};
1898
Eric Dumazetdd2e0b82020-06-19 12:12:35 -07001899INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
1900{
Eric Dumazet37354402021-11-15 11:02:32 -08001901 __tcp_v6_send_check(skb, &sk->sk_v6_rcv_saddr, &sk->sk_v6_daddr);
Eric Dumazetdd2e0b82020-06-19 12:12:35 -07001902}
1903
Mat Martineau35b2c322020-01-09 07:59:21 -08001904const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001905 .queue_xmit = inet6_csk_xmit,
1906 .send_check = tcp_v6_send_check,
1907 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001908 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001909 .conn_request = tcp_v6_conn_request,
1910 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001911 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001912 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001913 .setsockopt = ipv6_setsockopt,
1914 .getsockopt = ipv6_getsockopt,
1915 .addr2sockaddr = inet6_csk_addr2sockaddr,
1916 .sockaddr_len = sizeof(struct sockaddr_in6),
Neal Cardwell4fab9072014-08-14 12:40:05 -04001917 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918};
1919
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001920#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001921static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001922 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001923 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001924 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001925};
David S. Millera9286302006-11-14 19:53:22 -08001926#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001927
Linus Torvalds1da177e2005-04-16 15:20:36 -07001928/*
1929 * TCP over IPv4 via INET6 API
1930 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001931static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001932 .queue_xmit = ip_queue_xmit,
1933 .send_check = tcp_v4_send_check,
1934 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001935 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001936 .conn_request = tcp_v6_conn_request,
1937 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001938 .net_header_len = sizeof(struct iphdr),
1939 .setsockopt = ipv6_setsockopt,
1940 .getsockopt = ipv6_getsockopt,
1941 .addr2sockaddr = inet6_csk_addr2sockaddr,
1942 .sockaddr_len = sizeof(struct sockaddr_in6),
Neal Cardwell4fab9072014-08-14 12:40:05 -04001943 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944};
1945
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001946#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001947static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001948 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001949 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001950 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001951};
David S. Millera9286302006-11-14 19:53:22 -08001952#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001953
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954/* NOTE: A lot of things set to zero explicitly by call to
1955 * sk_alloc() so need not be done here.
1956 */
1957static int tcp_v6_init_sock(struct sock *sk)
1958{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001959 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001960
Neal Cardwell900f65d2012-04-19 09:55:21 +00001961 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001963 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001965#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001966 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001967#endif
1968
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 return 0;
1970}
1971
Brian Haley7d06b2e2008-06-14 17:04:49 -07001972static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001973{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001975 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001976}
1977
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001978#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001980static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001981 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001983 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001984 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1985 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
1987 if (ttd < 0)
1988 ttd = 0;
1989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 seq_printf(seq,
1991 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001992 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 i,
1994 src->s6_addr32[0], src->s6_addr32[1],
1995 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001996 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997 dest->s6_addr32[0], dest->s6_addr32[1],
1998 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001999 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08002001 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002002 1, /* timers active (only the expire timer) */
2003 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00002004 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002005 from_kuid_munged(seq_user_ns(seq),
2006 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002007 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008 0, /* open_requests have no inode */
2009 0, req);
2010}
2011
2012static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2013{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00002014 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002015 __u16 destp, srcp;
2016 int timer_active;
2017 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002018 const struct inet_sock *inet = inet_sk(sp);
2019 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002020 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002021 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002022 int rx_queue;
2023 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
Eric Dumazetefe42082013-10-03 15:42:29 -07002025 dest = &sp->sk_v6_daddr;
2026 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00002027 destp = ntohs(inet->inet_dport);
2028 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002029
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07002030 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08002031 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07002032 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002034 timer_expires = icsk->icsk_timeout;
2035 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002037 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 } else if (timer_pending(&sp->sk_timer)) {
2039 timer_active = 2;
2040 timer_expires = sp->sk_timer.expires;
2041 } else {
2042 timer_active = 0;
2043 timer_expires = jiffies;
2044 }
2045
Yafang Shao986ffdf2017-12-20 11:12:52 +08002046 state = inet_sk_state_load(sp);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002047 if (state == TCP_LISTEN)
Eric Dumazet288efe82019-11-05 14:11:53 -08002048 rx_queue = READ_ONCE(sp->sk_ack_backlog);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002049 else
2050 /* Because we don't lock the socket,
2051 * we might find a transient negative value.
2052 */
Eric Dumazetdba7d9b2019-10-10 20:17:39 -07002053 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
Eric Dumazet7db48e92019-10-10 20:17:40 -07002054 READ_ONCE(tp->copied_seq), 0);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002055
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056 seq_printf(seq,
2057 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02002058 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 i,
2060 src->s6_addr32[0], src->s6_addr32[1],
2061 src->s6_addr32[2], src->s6_addr32[3], srcp,
2062 dest->s6_addr32[0], dest->s6_addr32[1],
2063 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002064 state,
Eric Dumazet0f317462019-10-10 20:17:41 -07002065 READ_ONCE(tp->write_seq) - tp->snd_una,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002066 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00002068 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002069 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002070 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002071 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072 sock_i_ino(sp),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002073 refcount_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002074 jiffies_to_clock_t(icsk->icsk_rto),
2075 jiffies_to_clock_t(icsk->icsk_ack.ato),
Wei Wang31954cd2019-01-25 10:53:19 -08002076 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07002077 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002078 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002079 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07002080 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 );
2082}
2083
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002084static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002085 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086{
Eric Dumazet789f5582015-04-12 18:51:09 -07002087 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00002088 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
Eric Dumazetefe42082013-10-03 15:42:29 -07002091 dest = &tw->tw_v6_daddr;
2092 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 destp = ntohs(tw->tw_dport);
2094 srcp = ntohs(tw->tw_sport);
2095
2096 seq_printf(seq,
2097 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00002098 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 i,
2100 src->s6_addr32[0], src->s6_addr32[1],
2101 src->s6_addr32[2], src->s6_addr32[3], srcp,
2102 dest->s6_addr32[0], dest->s6_addr32[1],
2103 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2104 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002105 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002106 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107}
2108
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109static int tcp6_seq_show(struct seq_file *seq, void *v)
2110{
2111 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002112 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114 if (v == SEQ_START_TOKEN) {
2115 seq_puts(seq,
2116 " sl "
2117 "local_address "
2118 "remote_address "
2119 "st tx_queue rx_queue tr tm->when retrnsmt"
2120 " uid timeout inode\n");
2121 goto out;
2122 }
2123 st = seq->private;
2124
Eric Dumazet079096f2015-10-02 11:43:32 -07002125 if (sk->sk_state == TCP_TIME_WAIT)
2126 get_timewait6_sock(seq, v, st->num);
2127 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002128 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002129 else
2130 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131out:
2132 return 0;
2133}
2134
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002135static const struct seq_operations tcp6_seq_ops = {
2136 .show = tcp6_seq_show,
2137 .start = tcp_seq_start,
2138 .next = tcp_seq_next,
2139 .stop = tcp_seq_stop,
2140};
2141
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144};
2145
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002146int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147{
Christoph Hellwigc3506372018-04-10 19:42:55 +02002148 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2149 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002150 return -ENOMEM;
2151 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152}
2153
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07002154void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002156 remove_proc_entry("tcp6", net->proc_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157}
2158#endif
2159
2160struct proto tcpv6_prot = {
2161 .name = "TCPv6",
2162 .owner = THIS_MODULE,
2163 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002164 .pre_connect = tcp_v6_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 .connect = tcp_v6_connect,
2166 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002167 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 .ioctl = tcp_ioctl,
2169 .init = tcp_v6_init_sock,
2170 .destroy = tcp_v6_destroy_sock,
2171 .shutdown = tcp_shutdown,
2172 .setsockopt = tcp_setsockopt,
2173 .getsockopt = tcp_getsockopt,
Stanislav Fomichev9cacf812021-01-15 08:34:59 -08002174 .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01002175 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002177 .sendmsg = tcp_sendmsg,
2178 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002180 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05002181 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002182 .unhash = inet_unhash,
2183 .get_port = inet_csk_get_port,
Menglong Dong91a760b2022-01-06 21:20:20 +08002184 .put_port = inet_put_port,
Cong Wang8a59f9d2021-03-30 19:32:31 -07002185#ifdef CONFIG_BPF_SYSCALL
2186 .psock_update_sk_prot = tcp_bpf_update_proto,
2187#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07002189 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07002190 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 .sockets_allocated = &tcp_sockets_allocated,
2192 .memory_allocated = &tcp_memory_allocated,
2193 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002194 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07002195 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08002196 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2197 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198 .max_header = MAX_TCP_HEADER,
2199 .obj_size = sizeof(struct tcp6_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08002200 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002201 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002202 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002203 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002204 .no_autobind = true,
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002205 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206};
Vinay Kumar Yadav6abde0b2020-06-02 00:07:05 +05302207EXPORT_SYMBOL_GPL(tcpv6_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208
David Aherna8e3bb32017-08-28 15:14:20 -07002209/* thinking of making this const? Don't.
2210 * early_demux can change based on sysctl.
2211 */
Julia Lawall39294c32017-08-01 18:27:28 +02002212static struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00002213 .early_demux = tcp_v6_early_demux,
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -06002214 .early_demux_handler = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 .handler = tcp_v6_rcv,
2216 .err_handler = tcp_v6_err,
2217 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2218};
2219
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220static struct inet_protosw tcpv6_protosw = {
2221 .type = SOCK_STREAM,
2222 .protocol = IPPROTO_TCP,
2223 .prot = &tcpv6_prot,
2224 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002225 .flags = INET_PROTOSW_PERMANENT |
2226 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227};
2228
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002229static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002230{
Denis V. Lunev56772422008-04-03 14:28:30 -07002231 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2232 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002233}
2234
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002235static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002236{
Denis V. Lunev56772422008-04-03 14:28:30 -07002237 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002238}
2239
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002240static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002241{
Haishuang Yan1946e672016-12-28 17:52:32 +08002242 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002243}
2244
2245static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002246 .init = tcpv6_net_init,
2247 .exit = tcpv6_net_exit,
2248 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002249};
2250
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002251int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002253 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08002254
Vlad Yasevich33362882012-11-15 08:49:15 +00002255 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2256 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00002257 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00002258
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002259 /* register inet6 protocol */
2260 ret = inet6_register_protosw(&tcpv6_protosw);
2261 if (ret)
2262 goto out_tcpv6_protocol;
2263
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002264 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002265 if (ret)
2266 goto out_tcpv6_protosw;
Mat Martineauf870fa02020-01-21 16:56:15 -08002267
2268 ret = mptcpv6_init();
2269 if (ret)
2270 goto out_tcpv6_pernet_subsys;
2271
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002272out:
2273 return ret;
2274
Mat Martineauf870fa02020-01-21 16:56:15 -08002275out_tcpv6_pernet_subsys:
2276 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002277out_tcpv6_protosw:
2278 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00002279out_tcpv6_protocol:
2280 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002281 goto out;
2282}
2283
Daniel Lezcano09f77092007-12-13 05:34:58 -08002284void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002285{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002286 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002287 inet6_unregister_protosw(&tcpv6_protosw);
2288 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289}