blob: 323989927a0a6a2274bcbc1cd0ac72e9d49b24ad [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 *
6 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09007 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09009 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * linux/net/ipv4/tcp.c
11 * linux/net/ipv4/tcp_input.c
12 * linux/net/ipv4/tcp_output.c
13 *
14 * Fixes:
15 * Hideaki YOSHIFUJI : sin6_scope_id support
16 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
17 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
18 * a single port at the same time.
19 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 */
21
Herbert Xueb4dea52008-12-29 23:04:08 -080022#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/socket.h>
27#include <linux/sockios.h>
28#include <linux/net.h>
29#include <linux/jiffies.h>
30#include <linux/in.h>
31#include <linux/in6.h>
32#include <linux/netdevice.h>
33#include <linux/init.h>
34#include <linux/jhash.h>
35#include <linux/ipsec.h>
36#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080038#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/ipv6.h>
40#include <linux/icmpv6.h>
41#include <linux/random.h>
Paolo Abeni0e219ae2019-05-03 17:01:37 +020042#include <linux/indirect_call_wrapper.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44#include <net/tcp.h>
45#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030046#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080047#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <net/ipv6.h>
49#include <net/transp_v6.h>
50#include <net/addrconf.h>
51#include <net/ip6_route.h>
52#include <net/ip6_checksum.h>
53#include <net/inet_ecn.h>
54#include <net/protocol.h>
55#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#include <net/snmp.h>
57#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080058#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070059#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070060#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030061#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Linus Torvalds1da177e2005-04-16 15:20:36 -070063#include <linux/proc_fs.h>
64#include <linux/seq_file.h>
65
Herbert Xucf80e0e2016-01-24 21:20:23 +080066#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080067#include <linux/scatterlist.h>
68
Song Liuc24b14c2017-10-23 09:20:24 -070069#include <trace/events/tcp.h>
70
Eric Dumazeta00e7442015-09-29 07:42:39 -070071static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
72static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070073 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Stephen Hemminger3b401a82009-09-01 19:25:04 +000077static const struct inet_connection_sock_af_ops ipv6_mapped;
Mat Martineau35b2c322020-01-09 07:59:21 -080078const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080079#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +000080static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
81static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090082#else
Eric Dumazet51723932015-09-29 21:24:05 -070083static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
David Aherndea53bb2019-12-30 14:14:28 -080084 const struct in6_addr *addr,
85 int l3index)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090086{
87 return NULL;
88}
David S. Millera9286302006-11-14 19:53:22 -080089#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Eric Dumazet93a77c12019-03-19 07:01:08 -070091/* Helper returning the inet6 address from a given tcp socket.
92 * It can be used in TCP stack instead of inet6_sk(sk).
93 * This avoids a dereference and allow compiler optimizations.
Eric Dumazetf5d54762019-04-01 03:09:20 -070094 * It is a specialized version of inet6_sk_generic().
Eric Dumazet93a77c12019-03-19 07:01:08 -070095 */
96static struct ipv6_pinfo *tcp_inet6_sk(const struct sock *sk)
97{
Eric Dumazetf5d54762019-04-01 03:09:20 -070098 unsigned int offset = sizeof(struct tcp6_sock) - sizeof(struct ipv6_pinfo);
Eric Dumazet93a77c12019-03-19 07:01:08 -070099
Eric Dumazetf5d54762019-04-01 03:09:20 -0700100 return (struct ipv6_pinfo *)(((u8 *)sk) + offset);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700101}
102
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000103static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
104{
105 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000106
Eric Dumazet5037e9e2015-12-14 14:08:53 -0800107 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -0700108 const struct rt6_info *rt = (const struct rt6_info *)dst;
109
Eric Dumazetca777ef2014-09-08 08:06:07 -0700110 sk->sk_rx_dst = dst;
111 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Eric Dumazet93a77c12019-03-19 07:01:08 -0700112 tcp_inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700113 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000114}
115
Eric Dumazet84b114b2017-05-05 06:56:54 -0700116static u32 tcp_v6_init_seq(const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
Eric Dumazet84b114b2017-05-05 06:56:54 -0700118 return secure_tcpv6_seq(ipv6_hdr(skb)->daddr.s6_addr32,
119 ipv6_hdr(skb)->saddr.s6_addr32,
120 tcp_hdr(skb)->dest,
121 tcp_hdr(skb)->source);
122}
123
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700124static u32 tcp_v6_init_ts_off(const struct net *net, const struct sk_buff *skb)
Eric Dumazet84b114b2017-05-05 06:56:54 -0700125{
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700126 return secure_tcpv6_ts_off(net, ipv6_hdr(skb)->daddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700127 ipv6_hdr(skb)->saddr.s6_addr32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
Andrey Ignatovd74bad42018-03-30 15:08:05 -0700130static int tcp_v6_pre_connect(struct sock *sk, struct sockaddr *uaddr,
131 int addr_len)
132{
133 /* This check is replicated from tcp_v6_connect() and intended to
134 * prevent BPF program called below from accessing bytes that are out
135 * of the bound specified by user in addr_len.
136 */
137 if (addr_len < SIN6_LEN_RFC2133)
138 return -EINVAL;
139
140 sock_owned_by_me(sk);
141
142 return BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr);
143}
144
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900145static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 int addr_len)
147{
148 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900149 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800150 struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700151 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000153 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800154 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500155 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 struct dst_entry *dst;
157 int addr_type;
158 int err;
Haishuang Yan1946e672016-12-28 17:52:32 +0800159 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900161 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 return -EINVAL;
163
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900164 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000165 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
David S. Miller4c9483b2011-03-12 16:22:43 -0500167 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500170 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
171 IP6_ECN_flow_init(fl6.flowlabel);
172 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500174 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Willem de Bruijn59c820b2019-07-07 05:34:45 -0400175 if (IS_ERR(flowlabel))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 fl6_sock_release(flowlabel);
178 }
179 }
180
181 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900182 * connect() to INADDR_ANY means loopback (BSD'ism).
183 */
184
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500185 if (ipv6_addr_any(&usin->sin6_addr)) {
186 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
187 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
188 &usin->sin6_addr);
189 else
190 usin->sin6_addr = in6addr_loopback;
191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193 addr_type = ipv6_addr_type(&usin->sin6_addr);
194
Weilong Chen4c99aa42013-12-19 18:44:34 +0800195 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 return -ENETUNREACH;
197
198 if (addr_type&IPV6_ADDR_LINKLOCAL) {
199 if (addr_len >= sizeof(struct sockaddr_in6) &&
200 usin->sin6_scope_id) {
201 /* If interface is set while binding, indices
202 * must coincide.
203 */
David Ahern54dc3e32018-01-04 14:03:54 -0800204 if (!sk_dev_equal_l3scope(sk, usin->sin6_scope_id))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 return -EINVAL;
206
207 sk->sk_bound_dev_if = usin->sin6_scope_id;
208 }
209
210 /* Connect to link-local address requires an interface */
211 if (!sk->sk_bound_dev_if)
212 return -EINVAL;
213 }
214
215 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700216 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 tp->rx_opt.ts_recent = 0;
218 tp->rx_opt.ts_recent_stamp = 0;
Eric Dumazet0f317462019-10-10 20:17:41 -0700219 WRITE_ONCE(tp->write_seq, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 }
221
Eric Dumazetefe42082013-10-03 15:42:29 -0700222 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500223 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225 /*
226 * TCP over IPv4
227 */
228
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500229 if (addr_type & IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800230 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 struct sockaddr_in sin;
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 if (__ipv6_only_sock(sk))
234 return -ENETUNREACH;
235
236 sin.sin_family = AF_INET;
237 sin.sin_port = usin->sin6_port;
238 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
239
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800240 icsk->icsk_af_ops = &ipv6_mapped;
Peter Krystadcec37a62020-01-21 16:56:18 -0800241 if (sk_is_mptcp(sk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +0100242 mptcpv6_handle_mapped(sk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800244#ifdef CONFIG_TCP_MD5SIG
245 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
246#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
248 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
249
250 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800251 icsk->icsk_ext_hdr_len = exthdrlen;
252 icsk->icsk_af_ops = &ipv6_specific;
Peter Krystadcec37a62020-01-21 16:56:18 -0800253 if (sk_is_mptcp(sk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +0100254 mptcpv6_handle_mapped(sk, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800256#ifdef CONFIG_TCP_MD5SIG
257 tp->af_specific = &tcp_sock_ipv6_specific;
258#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700261 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263 return err;
264 }
265
Eric Dumazetefe42082013-10-03 15:42:29 -0700266 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
267 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
David S. Miller4c9483b2011-03-12 16:22:43 -0500269 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700270 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000271 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500272 fl6.flowi6_oif = sk->sk_bound_dev_if;
273 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500274 fl6.fl6_dport = usin->sin6_port;
275 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900276 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200278 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800279 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Paul Moore3df98d72020-09-27 22:38:26 -0400281 security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700282
Sabrina Dubrocac4e85f72019-12-04 15:35:52 +0100283 dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800284 if (IS_ERR(dst)) {
285 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Ian Morris63159f22015-03-29 14:00:04 +0100289 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500290 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700291 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 }
293
294 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000295 np->saddr = *saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000296 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700298 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800299 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800301 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800302 if (opt)
303 icsk->icsk_ext_hdr_len = opt->opt_flen +
304 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305
306 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
307
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000308 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800311 err = inet6_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 if (err)
313 goto late_failure;
314
Tom Herbert877d1f62015-07-28 16:02:05 -0700315 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530316
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300317 if (likely(!tp->repair)) {
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300318 if (!tp->write_seq)
Eric Dumazet0f317462019-10-10 20:17:41 -0700319 WRITE_ONCE(tp->write_seq,
320 secure_tcpv6_seq(np->saddr.s6_addr32,
321 sk->sk_v6_daddr.s6_addr32,
322 inet->inet_sport,
323 inet->inet_dport));
Eric Dumazet5d2ed052017-06-07 10:34:39 -0700324 tp->tsoffset = secure_tcpv6_ts_off(sock_net(sk),
325 np->saddr.s6_addr32,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700326 sk->sk_v6_daddr.s6_addr32);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328
Wei Wang19f6d3f2017-01-23 10:59:22 -0800329 if (tcp_fastopen_defer_connect(sk, &err))
330 return err;
331 if (err)
332 goto late_failure;
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 err = tcp_connect(sk);
335 if (err)
336 goto late_failure;
337
338 return 0;
339
340late_failure:
341 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342failure:
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000343 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 sk->sk_route_caps = 0;
345 return err;
346}
347
Eric Dumazet563d34d2012-07-23 09:48:52 +0200348static void tcp_v6_mtu_reduced(struct sock *sk)
349{
350 struct dst_entry *dst;
Eric Dumazetc7bb4b82021-07-08 00:21:09 -0700351 u32 mtu;
Eric Dumazet563d34d2012-07-23 09:48:52 +0200352
353 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
354 return;
355
Eric Dumazetc7bb4b82021-07-08 00:21:09 -0700356 mtu = READ_ONCE(tcp_sk(sk)->mtu_info);
357
358 /* Drop requests trying to increase our current mss.
359 * Check done in __ip6_rt_update_pmtu() is too late.
360 */
361 if (tcp_mtu_to_mss(sk, mtu) >= tcp_sk(sk)->mss_cache)
362 return;
363
364 dst = inet6_csk_update_pmtu(sk, mtu);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200365 if (!dst)
366 return;
367
368 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
369 tcp_sync_mss(sk, dst_mtu(dst));
370 tcp_simple_retransmit(sk);
371 }
372}
373
Stefano Brivio32bbd872018-11-08 12:19:21 +0100374static int tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700375 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800377 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300378 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700379 struct net *net = dev_net(skb->dev);
380 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700382 struct tcp_sock *tp;
383 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800385 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387
Eric Dumazet22150892015-03-22 10:22:23 -0700388 sk = __inet6_lookup_established(net, &tcp_hashinfo,
389 &hdr->daddr, th->dest,
390 &hdr->saddr, ntohs(th->source),
David Ahern4297a0e2017-08-07 08:44:21 -0700391 skb->dev->ifindex, inet6_sdif(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Eric Dumazet22150892015-03-22 10:22:23 -0700393 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700394 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
395 ICMP6_MIB_INERRORS);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100396 return -ENOENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 }
398
399 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700400 inet_twsk_put(inet_twsk(sk));
Stefano Brivio32bbd872018-11-08 12:19:21 +0100401 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 }
Eric Dumazet22150892015-03-22 10:22:23 -0700403 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800404 fatal = icmpv6_err_convert(type, code, &err);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100405 if (sk->sk_state == TCP_NEW_SYN_RECV) {
406 tcp_req_err(sk, seq, fatal);
407 return 0;
408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200411 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700412 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
414 if (sk->sk_state == TCP_CLOSE)
415 goto out;
416
Eric Dumazet93a77c12019-03-19 07:01:08 -0700417 if (ipv6_hdr(skb)->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700418 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700419 goto out;
420 }
421
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700423 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
Eric Dumazetd983ea62019-10-10 20:17:38 -0700424 fastopen = rcu_dereference(tp->fastopen_rsk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700425 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700427 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700428 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 goto out;
430 }
431
Eric Dumazet93a77c12019-03-19 07:01:08 -0700432 np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
David S. Millerec18d9a2012-07-12 00:25:15 -0700434 if (type == NDISC_REDIRECT) {
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100435 if (!sock_owned_by_user(sk)) {
436 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
David S. Millerec18d9a2012-07-12 00:25:15 -0700437
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100438 if (dst)
439 dst->ops->redirect(dst, sk, skb);
440 }
Christoph Paasch50a75a82013-04-07 04:53:15 +0000441 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700442 }
443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazetc7bb4b82021-07-08 00:21:09 -0700445 u32 mtu = ntohl(info);
446
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000447 /* We are not interested in TCP_LISTEN and open_requests
448 * (SYN-ACKs send out by Linux are always <576bytes so
449 * they should go through unfragmented).
450 */
451 if (sk->sk_state == TCP_LISTEN)
452 goto out;
453
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100454 if (!ip6_sk_accept_pmtu(sk))
455 goto out;
456
Eric Dumazetc7bb4b82021-07-08 00:21:09 -0700457 if (mtu < IPV6_MIN_MTU)
458 goto out;
459
460 WRITE_ONCE(tp->mtu_info, mtu);
461
Eric Dumazet563d34d2012-07-23 09:48:52 +0200462 if (!sock_owned_by_user(sk))
463 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000464 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800465 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000466 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 goto out;
468 }
469
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700471 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700474 case TCP_SYN_RECV:
475 /* Only in fast or simultaneous open. If a fast open socket is
Randy Dunlap634a63e2020-09-17 21:35:17 -0700476 * already accepted it is treated as a connected one below.
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700477 */
Ian Morris63159f22015-03-29 14:00:04 +0100478 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700479 break;
480
Eric Dumazet45af29c2020-05-24 11:00:02 -0700481 ipv6_icmp_error(sk, skb, err, th->dest, ntohl(info), (u8 *)th);
482
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 sk->sk_err = err;
Alexander Aringe3ae2362021-06-27 18:48:21 -0400485 sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486
487 tcp_done(sk);
488 } else
489 sk->sk_err_soft = err;
490 goto out;
Eric Dumazetd2924562020-05-27 17:34:58 -0700491 case TCP_LISTEN:
492 break;
493 default:
494 /* check if this ICMP message allows revert of backoff.
495 * (see RFC 6069)
496 */
497 if (!fastopen && type == ICMPV6_DEST_UNREACH &&
498 code == ICMPV6_NOROUTE)
499 tcp_ld_RTO_revert(sk, seq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 }
501
502 if (!sock_owned_by_user(sk) && np->recverr) {
503 sk->sk_err = err;
Alexander Aringe3ae2362021-06-27 18:48:21 -0400504 sk_error_report(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 } else
506 sk->sk_err_soft = err;
507
508out:
509 bh_unlock_sock(sk);
510 sock_put(sk);
Stefano Brivio32bbd872018-11-08 12:19:21 +0100511 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512}
513
514
Eric Dumazet0f935db2015-09-25 07:39:21 -0700515static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300516 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000517 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700518 struct tcp_fastopen_cookie *foc,
Martin KaFai Lau331fca42020-08-20 12:00:52 -0700519 enum tcp_synack_type synack_type,
520 struct sk_buff *syn_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700522 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700523 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400524 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300525 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800526 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000527 int err = -ENOMEM;
Wei Wangac8f1712020-09-09 17:50:48 -0700528 u8 tclass;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000530 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700531 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
532 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800533 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000534
Martin KaFai Lau331fca42020-08-20 12:00:52 -0700535 skb = tcp_make_synack(sk, dst, req, foc, synack_type, syn_skb);
Neal Cardwell94942182012-06-28 12:34:20 +0000536
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700538 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
539 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540
Eric Dumazet634fb9792013-10-09 15:21:29 -0700541 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100542 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100543 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
544
Wei Wangac8f1712020-09-09 17:50:48 -0700545 tclass = sock_net(sk)->ipv4.sysctl_tcp_reflect_tos ?
Wei Wang8ef44b62020-12-08 09:55:08 -0800546 (tcp_rsk(req)->syn_tos & ~INET_ECN_MASK) |
547 (np->tclass & INET_ECN_MASK) :
Alexander Duyck861602b2020-11-19 13:23:51 -0800548 np->tclass;
Alexander Duyck407c85c2020-11-20 19:47:44 -0800549
550 if (!INET_ECN_is_capable(tclass) &&
551 tcp_bpf_ca_needs_ecn((struct sock *)req))
552 tclass |= INET_ECN_ECT_0;
553
554 rcu_read_lock();
555 opt = ireq->ipv6_opt;
Huw Davies56ac42b2016-06-27 15:05:28 -0400556 if (!opt)
557 opt = rcu_dereference(np->opt);
Wei Wangac8f1712020-09-09 17:50:48 -0700558 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt,
Alexander Duyck861602b2020-11-19 13:23:51 -0800559 tclass, sk->sk_priority);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800560 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200561 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 }
563
564done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 return err;
566}
567
Octavian Purdila72659ec2010-01-17 19:09:39 -0800568
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700569static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570{
Huw Davies56ac42b2016-06-27 15:05:28 -0400571 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700572 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573}
574
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800575#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700576static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
David Aherndea53bb2019-12-30 14:14:28 -0800577 const struct in6_addr *addr,
578 int l3index)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800579{
David Aherndea53bb2019-12-30 14:14:28 -0800580 return tcp_md5_do_lookup(sk, l3index,
581 (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800582}
583
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700584static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700585 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800586{
David Aherndea53bb2019-12-30 14:14:28 -0800587 int l3index;
588
589 l3index = l3mdev_master_ifindex_by_index(sock_net(sk),
590 addr_sk->sk_bound_dev_if);
591 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr,
592 l3index);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800593}
594
Ivan Delalande8917a772017-06-15 18:07:07 -0700595static int tcp_v6_parse_md5_keys(struct sock *sk, int optname,
Christoph Hellwigd4c19c42020-07-23 08:09:05 +0200596 sockptr_t optval, int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800597{
598 struct tcp_md5sig cmd;
599 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
David Aherndea53bb2019-12-30 14:14:28 -0800600 int l3index = 0;
Ivan Delalande8917a772017-06-15 18:07:07 -0700601 u8 prefixlen;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800602
603 if (optlen < sizeof(cmd))
604 return -EINVAL;
605
Christoph Hellwigd4c19c42020-07-23 08:09:05 +0200606 if (copy_from_sockptr(&cmd, optval, sizeof(cmd)))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800607 return -EFAULT;
608
609 if (sin6->sin6_family != AF_INET6)
610 return -EINVAL;
611
Ivan Delalande8917a772017-06-15 18:07:07 -0700612 if (optname == TCP_MD5SIG_EXT &&
613 cmd.tcpm_flags & TCP_MD5SIG_FLAG_PREFIX) {
614 prefixlen = cmd.tcpm_prefixlen;
615 if (prefixlen > 128 || (ipv6_addr_v4mapped(&sin6->sin6_addr) &&
616 prefixlen > 32))
617 return -EINVAL;
618 } else {
619 prefixlen = ipv6_addr_v4mapped(&sin6->sin6_addr) ? 32 : 128;
620 }
621
David Ahern6b102db2019-12-30 14:14:29 -0800622 if (optname == TCP_MD5SIG_EXT &&
623 cmd.tcpm_flags & TCP_MD5SIG_FLAG_IFINDEX) {
624 struct net_device *dev;
625
626 rcu_read_lock();
627 dev = dev_get_by_index_rcu(sock_net(sk), cmd.tcpm_ifindex);
628 if (dev && netif_is_l3_master(dev))
629 l3index = dev->ifindex;
630 rcu_read_unlock();
631
632 /* ok to reference set/not set outside of rcu;
633 * right now device MUST be an L3 master
634 */
635 if (!dev || !l3index)
636 return -EINVAL;
637 }
638
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800639 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700640 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000641 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
David Ahern6b102db2019-12-30 14:14:29 -0800642 AF_INET, prefixlen,
643 l3index);
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000644 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
David Aherndea53bb2019-12-30 14:14:28 -0800645 AF_INET6, prefixlen, l3index);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800646 }
647
648 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
649 return -EINVAL;
650
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000651 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
652 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
David Aherndea53bb2019-12-30 14:14:28 -0800653 AF_INET, prefixlen, l3index,
654 cmd.tcpm_key, cmd.tcpm_keylen,
655 GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800656
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000657 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
David Aherndea53bb2019-12-30 14:14:28 -0800658 AF_INET6, prefixlen, l3index,
659 cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800660}
661
Eric Dumazet19689e32016-06-27 18:51:53 +0200662static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
663 const struct in6_addr *daddr,
664 const struct in6_addr *saddr,
665 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800666{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800667 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700668 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200669 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900670
Eric Dumazet19689e32016-06-27 18:51:53 +0200671 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800672 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000673 bp->saddr = *saddr;
674 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700675 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700676 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800677
Eric Dumazet19689e32016-06-27 18:51:53 +0200678 _th = (struct tcphdr *)(bp + 1);
679 memcpy(_th, th, sizeof(*th));
680 _th->check = 0;
681
682 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
683 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
684 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800685 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700686}
David S. Millerc7da57a2007-10-26 00:41:21 -0700687
Eric Dumazet19689e32016-06-27 18:51:53 +0200688static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000689 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400690 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700691{
692 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800693 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700694
695 hp = tcp_get_md5sig_pool();
696 if (!hp)
697 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800698 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700699
Herbert Xucf80e0e2016-01-24 21:20:23 +0800700 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700701 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200702 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700703 goto clear_hash;
704 if (tcp_md5_hash_key(hp, key))
705 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800706 ahash_request_set_crypt(req, NULL, md5_hash, 0);
707 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800708 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800709
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800710 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800711 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700712
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800713clear_hash:
714 tcp_put_md5sig_pool();
715clear_hash_noput:
716 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700717 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800718}
719
Eric Dumazet39f8e582015-03-24 15:58:55 -0700720static int tcp_v6_md5_hash_skb(char *md5_hash,
721 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400722 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400723 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800724{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000725 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700726 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800727 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400728 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800729
Eric Dumazet39f8e582015-03-24 15:58:55 -0700730 if (sk) { /* valid for establish/request sockets */
731 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700732 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700733 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000734 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700735 saddr = &ip6h->saddr;
736 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800737 }
Adam Langley49a72df2008-07-19 00:01:42 -0700738
739 hp = tcp_get_md5sig_pool();
740 if (!hp)
741 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800742 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700743
Herbert Xucf80e0e2016-01-24 21:20:23 +0800744 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700745 goto clear_hash;
746
Eric Dumazet19689e32016-06-27 18:51:53 +0200747 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700748 goto clear_hash;
749 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
750 goto clear_hash;
751 if (tcp_md5_hash_key(hp, key))
752 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800753 ahash_request_set_crypt(req, NULL, md5_hash, 0);
754 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700755 goto clear_hash;
756
757 tcp_put_md5sig_pool();
758 return 0;
759
760clear_hash:
761 tcp_put_md5sig_pool();
762clear_hash_noput:
763 memset(md5_hash, 0, 16);
764 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800765}
766
Eric Dumazetba8e2752015-10-02 11:43:28 -0700767#endif
768
769static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
David Ahernd14c77e2019-12-30 14:14:26 -0800770 const struct sk_buff *skb,
771 int dif, int sdif)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800772{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700773#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400774 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800775 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000776 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400777 const struct tcphdr *th = tcp_hdr(skb);
David Aherndea53bb2019-12-30 14:14:28 -0800778 int genhash, l3index;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800779 u8 newhash[16];
780
David Aherndea53bb2019-12-30 14:14:28 -0800781 /* sdif set, means packet ingressed via a device
782 * in an L3 domain and dif is set to the l3mdev
783 */
784 l3index = sdif ? dif : 0;
785
786 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr, l3index);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900787 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800788
David S. Miller785957d2008-07-30 03:03:15 -0700789 /* We've parsed the options - do we have a hash? */
790 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700791 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700792
793 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700794 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700795 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800796 }
797
David S. Miller785957d2008-07-30 03:03:15 -0700798 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700799 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700800 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800801 }
802
803 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700804 genhash = tcp_v6_md5_hash_skb(newhash,
805 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700806 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700807
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800808 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700809 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
David Aherndea53bb2019-12-30 14:14:28 -0800810 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u L3 index %d\n",
Joe Perchese87cc472012-05-13 21:56:26 +0000811 genhash ? "failed" : "mismatch",
812 &ip6h->saddr, ntohs(th->source),
David Aherndea53bb2019-12-30 14:14:28 -0800813 &ip6h->daddr, ntohs(th->dest), l3index);
Eric Dumazetff74e232015-03-24 15:58:54 -0700814 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800815 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700816#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700817 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800818}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800819
Eric Dumazetb40cf182015-09-25 07:39:08 -0700820static void tcp_v6_init_req(struct request_sock *req,
821 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300822 struct sk_buff *skb)
823{
David Ahernc2027d12018-12-12 15:27:38 -0800824 bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
Octavian Purdila16bea702014-06-25 17:09:53 +0300825 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazet93a77c12019-03-19 07:01:08 -0700826 const struct ipv6_pinfo *np = tcp_inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300827
828 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
829 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
830
Octavian Purdila16bea702014-06-25 17:09:53 +0300831 /* So that link locals have meaning */
David Ahernc2027d12018-12-12 15:27:38 -0800832 if ((!sk_listener->sk_bound_dev_if || l3_slave) &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300833 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700834 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300835
Eric Dumazet04317da2014-09-05 15:33:32 -0700836 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700837 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700838 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300839 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
840 np->rxopt.bits.rxohlim || np->repflow)) {
Reshetova, Elena63354792017-06-30 13:07:58 +0300841 refcount_inc(&skb->users);
Octavian Purdila16bea702014-06-25 17:09:53 +0300842 ireq->pktopts = skb;
843 }
844}
845
Eric Dumazetf9646292015-09-29 07:42:50 -0700846static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
Florian Westphal7ea851d2020-11-30 16:36:30 +0100847 struct sk_buff *skb,
Eric Dumazetf9646292015-09-29 07:42:50 -0700848 struct flowi *fl,
Florian Westphal7ea851d2020-11-30 16:36:30 +0100849 struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +0300850{
Florian Westphal7ea851d2020-11-30 16:36:30 +0100851 tcp_v6_init_req(req, sk, skb);
852
853 if (security_inet_conn_request(sk, skb, req))
854 return NULL;
855
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700856 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300857}
858
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800859struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700861 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300862 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700863 .send_ack = tcp_v6_reqsk_send_ack,
864 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800865 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800866 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867};
868
Mat Martineau35b2c322020-01-09 07:59:21 -0800869const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300870 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
871 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300872#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700873 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000874 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800875#endif
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300876#ifdef CONFIG_SYN_COOKIES
877 .cookie_init_seq = cookie_v6_init_sequence,
878#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300879 .route_req = tcp_v6_route_req,
Eric Dumazet84b114b2017-05-05 06:56:54 -0700880 .init_seq = tcp_v6_init_seq,
881 .init_ts_off = tcp_v6_init_ts_off,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300882 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300883};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800884
Eric Dumazeta00e7442015-09-29 07:42:39 -0700885static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800886 u32 ack, u32 win, u32 tsval, u32 tsecr,
887 int oif, struct tcp_md5sig_key *key, int rst,
Eric Dumazete9a5dce2019-09-24 08:01:15 -0700888 u8 tclass, __be32 label, u32 priority)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400890 const struct tcphdr *th = tcp_hdr(skb);
891 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500893 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800894 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800895 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800896 unsigned int tot_len = sizeof(struct tcphdr);
Florian Westphaldc87efd2021-04-01 16:19:44 -0700897 __be32 mrst = 0, *topt;
Eric Dumazetadf30902009-06-02 05:19:30 +0000898 struct dst_entry *dst;
Jon Maxwell00483692018-05-10 16:53:51 +1000899 __u32 mark = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900
Andrey Vaginee684b62013-02-11 05:50:19 +0000901 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700902 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800903#ifdef CONFIG_TCP_MD5SIG
904 if (key)
905 tot_len += TCPOLEN_MD5SIG_ALIGNED;
906#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907
Florian Westphaldc87efd2021-04-01 16:19:44 -0700908#ifdef CONFIG_MPTCP
909 if (rst && !key) {
910 mrst = mptcp_reset_option(skb);
911
912 if (mrst)
913 tot_len += sizeof(__be32);
914 }
915#endif
916
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
918 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100919 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return;
921
922 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
923
Johannes Bergd58ff352017-06-16 14:29:23 +0200924 t1 = skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700925 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927 /* Swap the send and the receive. */
928 memset(t1, 0, sizeof(*t1));
929 t1->dest = th->source;
930 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700931 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 t1->seq = htonl(seq);
933 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700934 t1->ack = !rst || !th->ack;
935 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800937
Al Viroe69a4ad2006-11-14 20:56:00 -0800938 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900939
Andrey Vaginee684b62013-02-11 05:50:19 +0000940 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800941 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
942 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000943 *topt++ = htonl(tsval);
944 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 }
946
Florian Westphaldc87efd2021-04-01 16:19:44 -0700947 if (mrst)
948 *topt++ = mrst;
949
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800950#ifdef CONFIG_TCP_MD5SIG
951 if (key) {
952 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
953 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700954 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700955 &ipv6_hdr(skb)->saddr,
956 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800957 }
958#endif
959
David S. Miller4c9483b2011-03-12 16:22:43 -0500960 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000961 fl6.daddr = ipv6_hdr(skb)->saddr;
962 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100963 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964
David S. Millere5700af2010-04-21 14:59:20 -0700965 buff->ip_summed = CHECKSUM_PARTIAL;
966 buff->csum = 0;
967
David S. Miller4c9483b2011-03-12 16:22:43 -0500968 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969
David S. Miller4c9483b2011-03-12 16:22:43 -0500970 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900971 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700972 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800973 else {
974 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
975 oif = skb->skb_iif;
976
977 fl6.flowi6_oif = oif;
978 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700979
Eric Dumazetc67b8552019-06-08 17:58:51 -0700980 if (sk) {
981 if (sk->sk_state == TCP_TIME_WAIT) {
982 mark = inet_twsk(sk)->tw_mark;
983 /* autoflowlabel relies on buff->hash */
984 skb_set_hash(buff, inet_twsk(sk)->tw_txhash,
985 PKT_HASH_TYPE_L4);
986 } else {
987 mark = sk->sk_mark;
988 }
Eric Dumazetd6fb3962019-06-13 21:22:35 -0700989 buff->tstamp = tcp_transmit_time(sk);
Eric Dumazetc67b8552019-06-08 17:58:51 -0700990 }
Jon Maxwell00483692018-05-10 16:53:51 +1000991 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark) ?: mark;
David S. Miller1958b852011-03-12 16:36:19 -0500992 fl6.fl6_dport = t1->dest;
993 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900994 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
Paul Moore3df98d72020-09-27 22:38:26 -0400995 security_skb_classify_flow(skb, flowi6_to_flowi_common(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700997 /* Pass a socket to ip6_dst_lookup either it is for RST
998 * Underlying function will use this to retrieve the network
999 * namespace
1000 */
Sabrina Dubrocac4e85f72019-12-04 15:35:52 +01001001 dst = ip6_dst_lookup_flow(sock_net(ctl_sk), ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -08001002 if (!IS_ERR(dst)) {
1003 skb_dst_set(buff, dst);
Wei Wange92dd772020-09-08 14:29:02 -07001004 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL,
1005 tclass & ~INET_ECN_MASK, priority);
Eric Dumazetc10d9312016-04-29 14:16:47 -07001006 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -08001007 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -07001008 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -08001009 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 }
1011
1012 kfree_skb(buff);
1013}
1014
Eric Dumazeta00e7442015-09-29 07:42:39 -07001015static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001016{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001017 const struct tcphdr *th = tcp_hdr(skb);
Eric Dumazet323a53c2019-06-05 07:55:09 -07001018 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001019 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -07001020 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001021#ifdef CONFIG_TCP_MD5SIG
1022 const __u8 *hash_location = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001023 unsigned char newhash[16];
1024 int genhash;
1025 struct sock *sk1 = NULL;
1026#endif
Eric Dumazet323a53c2019-06-05 07:55:09 -07001027 __be32 label = 0;
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001028 u32 priority = 0;
Eric Dumazet323a53c2019-06-05 07:55:09 -07001029 struct net *net;
Song Liuc24b14c2017-10-23 09:20:24 -07001030 int oif = 0;
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001031
1032 if (th->rst)
1033 return;
1034
Eric Dumazetc3658e82014-11-25 07:40:04 -08001035 /* If sk not NULL, it means we did a successful lookup and incoming
1036 * route had to be correct. prequeue might have dropped our dst.
1037 */
1038 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001039 return;
1040
Eric Dumazet39209672019-06-07 12:23:48 -07001041 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001042#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -07001043 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +00001044 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +01001045 if (sk && sk_fullsock(sk)) {
David Aherndea53bb2019-12-30 14:14:28 -08001046 int l3index;
1047
1048 /* sdif set, means packet ingressed via a device
1049 * in an L3 domain and inet_iif is set to it.
1050 */
1051 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1052 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr, l3index);
Florian Westphale46787f2015-12-21 21:29:25 +01001053 } else if (hash_location) {
David Ahernd14c77e2019-12-30 14:14:26 -08001054 int dif = tcp_v6_iif_l3_slave(skb);
1055 int sdif = tcp_v6_sdif(skb);
David Aherndea53bb2019-12-30 14:14:28 -08001056 int l3index;
David Ahernd14c77e2019-12-30 14:14:26 -08001057
Shawn Lu658ddaa2012-01-31 22:35:48 +00001058 /*
1059 * active side is lost. Try to find listening socket through
1060 * source port, and then find md5 key through listening socket.
1061 * we are not loose security here:
1062 * Incoming packet is checked with md5 hash with finding key,
1063 * no RST generated if md5 hash doesn't match.
1064 */
Eric Dumazet323a53c2019-06-05 07:55:09 -07001065 sk1 = inet6_lookup_listener(net,
Craig Galleka5836362016-02-10 11:50:38 -05001066 &tcp_hashinfo, NULL, 0,
1067 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +00001068 th->source, &ipv6h->daddr,
David Ahernd14c77e2019-12-30 14:14:26 -08001069 ntohs(th->source), dif, sdif);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001070 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001071 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001072
David Aherndea53bb2019-12-30 14:14:28 -08001073 /* sdif set, means packet ingressed via a device
1074 * in an L3 domain and dif is set to it.
1075 */
1076 l3index = tcp_v6_sdif(skb) ? dif : 0;
1077
1078 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr, l3index);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001079 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001080 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001081
Eric Dumazet39f8e582015-03-24 15:58:55 -07001082 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001083 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -07001084 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +00001085 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001086#endif
1087
1088 if (th->ack)
1089 seq = ntohl(th->ack_seq);
1090 else
1091 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
1092 (th->doff << 2);
1093
Song Liuc24b14c2017-10-23 09:20:24 -07001094 if (sk) {
1095 oif = sk->sk_bound_dev_if;
Eric Dumazet052e0692019-07-10 06:40:09 -07001096 if (sk_fullsock(sk)) {
1097 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
1098
Song Liu5c487bb2018-02-06 20:50:23 -08001099 trace_tcp_send_reset(sk, skb);
Eric Dumazet052e0692019-07-10 06:40:09 -07001100 if (np->repflow)
1101 label = ip6_flowlabel(ipv6h);
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001102 priority = sk->sk_priority;
Eric Dumazet052e0692019-07-10 06:40:09 -07001103 }
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001104 if (sk->sk_state == TCP_TIME_WAIT) {
Eric Dumazet50a8acc2019-06-05 07:55:10 -07001105 label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001106 priority = inet_twsk(sk)->tw_priority;
1107 }
Eric Dumazet323a53c2019-06-05 07:55:09 -07001108 } else {
Eric Dumazeta346abe2019-07-01 06:39:36 -07001109 if (net->ipv6.sysctl.flowlabel_reflect & FLOWLABEL_REFLECT_TCP_RESET)
Eric Dumazet323a53c2019-06-05 07:55:09 -07001110 label = ip6_flowlabel(ipv6h);
Song Liuc24b14c2017-10-23 09:20:24 -07001111 }
1112
Wei Wange92dd772020-09-08 14:29:02 -07001113 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1,
1114 ipv6_get_dsfield(ipv6h), label, priority);
Shawn Lu658ddaa2012-01-31 22:35:48 +00001115
1116#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -07001117out:
1118 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +00001119#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001120}
1121
Eric Dumazeta00e7442015-09-29 07:42:39 -07001122static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001123 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +01001124 struct tcp_md5sig_key *key, u8 tclass,
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001125 __be32 label, u32 priority)
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001126{
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001127 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
Eric Dumazete9a5dce2019-09-24 08:01:15 -07001128 tclass, label, priority);
Ilpo Järvinen626e2642008-10-09 14:42:40 -07001129}
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
1132{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001133 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001134 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001136 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001137 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001138 tcp_time_stamp_raw() + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +08001139 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Eric Dumazetf6c0f5d2019-09-24 08:01:16 -07001140 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel), tw->tw_priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001142 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143}
1144
Eric Dumazeta00e7442015-09-29 07:42:39 -07001145static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -07001146 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147{
David Aherndea53bb2019-12-30 14:14:28 -08001148 int l3index;
1149
1150 l3index = tcp_v6_sdif(skb) ? tcp_v6_iif_l3_slave(skb) : 0;
1151
Daniel Lee3a19ce02014-05-11 20:22:13 -07001152 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
1153 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
1154 */
Eric Dumazet20a2b492016-08-22 11:31:10 -07001155 /* RFC 7323 2.3
1156 * The window field (SEG.WND) of every outgoing segment, with the
1157 * exception of <SYN> segments, MUST be right-shifted by
1158 * Rcv.Wind.Shift bits:
1159 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -08001160 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -07001161 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -07001162 tcp_rsk(req)->rcv_nxt,
1163 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Eric Dumazet9a568de2017-05-16 14:00:14 -07001164 tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
Florian Westphal95a22ca2016-12-01 11:32:06 +01001165 req->ts_recent, sk->sk_bound_dev_if,
David Aherndea53bb2019-12-30 14:14:28 -08001166 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr, l3index),
Wei Wange92dd772020-09-08 14:29:02 -07001167 ipv6_get_dsfield(ipv6_hdr(skb)), 0, sk->sk_priority);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168}
1169
1170
Eric Dumazet079096f2015-10-02 11:43:32 -07001171static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001173#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -07001174 const struct tcphdr *th = tcp_hdr(skb);
1175
Florian Westphalaf9b4732010-06-03 00:43:44 +00001176 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001177 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178#endif
1179 return sk;
1180}
1181
Petar Penkov9349d602019-07-29 09:59:14 -07001182u16 tcp_v6_get_syncookie(struct sock *sk, struct ipv6hdr *iph,
1183 struct tcphdr *th, u32 *cookie)
1184{
1185 u16 mss = 0;
1186#ifdef CONFIG_SYN_COOKIES
1187 mss = tcp_get_syncookie_mss(&tcp6_request_sock_ops,
1188 &tcp_request_sock_ipv6_ops, sk, th);
1189 if (mss) {
1190 *cookie = __cookie_v6_init_sequence(iph, th, &mss);
1191 tcp_synq_overflow(sk);
1192 }
1193#endif
1194 return mss;
1195}
1196
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
1198{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 if (skb->protocol == htons(ETH_P_IP))
1200 return tcp_v4_conn_request(sk, skb);
1201
1202 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001203 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
Jakub Kicinskidcc32f4f2021-03-17 09:55:15 -07001205 if (ipv6_addr_v4mapped(&ipv6_hdr(skb)->saddr)) {
1206 __IP6_INC_STATS(sock_net(sk), NULL, IPSTATS_MIB_INHDRERRORS);
1207 return 0;
1208 }
1209
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001210 return tcp_conn_request(&tcp6_request_sock_ops,
1211 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213drop:
Eric Dumazet9caad862016-04-01 08:52:20 -07001214 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 return 0; /* don't send reset */
1216}
1217
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001218static void tcp_v6_restore_cb(struct sk_buff *skb)
1219{
1220 /* We need to move header back to the beginning if xfrm6_policy_check()
1221 * and tcp_v6_fill_cb() are going to be called again.
1222 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1223 */
1224 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1225 sizeof(struct inet6_skb_parm));
1226}
1227
Eric Dumazet0c271712015-09-29 07:42:48 -07001228static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001229 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001230 struct dst_entry *dst,
1231 struct request_sock *req_unhash,
1232 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001234 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001235 struct ipv6_pinfo *newnp;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001236 const struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001237 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 struct inet_sock *newinet;
Ricardo Dias01770a12020-11-20 11:11:33 +00001239 bool found_dup_sk = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 struct tcp_sock *newtp;
1241 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001242#ifdef CONFIG_TCP_MD5SIG
1243 struct tcp_md5sig_key *key;
David Aherndea53bb2019-12-30 14:14:28 -08001244 int l3index;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001245#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001246 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247
1248 if (skb->protocol == htons(ETH_P_IP)) {
1249 /*
1250 * v6 mapped
1251 */
1252
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001253 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1254 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
Ian Morris63159f22015-03-29 14:00:04 +01001256 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 return NULL;
1258
Eric Dumazet93a77c12019-03-19 07:01:08 -07001259 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260
1261 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001262 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 newtp = tcp_sk(newsk);
1264
1265 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1266
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001267 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001269 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Peter Krystadcec37a62020-01-21 16:56:18 -08001270 if (sk_is_mptcp(newsk))
Geert Uytterhoeven31484d52020-01-30 10:45:26 +01001271 mptcpv6_handle_mapped(newsk, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001273#ifdef CONFIG_TCP_MD5SIG
1274 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1275#endif
1276
WANG Cong83eadda2017-05-09 16:59:54 -07001277 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001278 newnp->ipv6_ac_list = NULL;
1279 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 newnp->pktoptions = NULL;
1281 newnp->opt = NULL;
Eric Dumazet89e41302019-03-19 05:45:35 -07001282 newnp->mcast_oif = inet_iif(skb);
1283 newnp->mcast_hops = ip_hdr(skb)->ttl;
1284 newnp->rcv_flowinfo = 0;
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001285 if (np->repflow)
Eric Dumazet89e41302019-03-19 05:45:35 -07001286 newnp->flow_label = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001288 /*
1289 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1290 * here, tcp_create_openreq_child now does this for us, see the comment in
1291 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293
1294 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001295 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 Sync it now.
1297 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001298 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
1300 return newsk;
1301 }
1302
Eric Dumazet634fb9792013-10-09 15:21:29 -07001303 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304
1305 if (sk_acceptq_is_full(sk))
1306 goto out_overflow;
1307
David S. Miller493f3772010-12-02 12:14:29 -08001308 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001309 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001310 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001312 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
1314 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001315 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001316 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001318 /*
1319 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1320 * count here, tcp_create_openreq_child now does this for us, see the
1321 * comment in that function for the gory details. -acme
1322 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
Stephen Hemminger59eed272006-08-25 15:55:43 -07001324 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001325 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001326 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
Eric Dumazet93a77c12019-03-19 07:01:08 -07001328 inet_sk(newsk)->pinet6 = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
1330 newtp = tcp_sk(newsk);
1331 newinet = inet_sk(newsk);
Eric Dumazet93a77c12019-03-19 07:01:08 -07001332 newnp = tcp_inet6_sk(newsk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
1334 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1335
Eric Dumazet634fb9792013-10-09 15:21:29 -07001336 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1337 newnp->saddr = ireq->ir_v6_loc_addr;
1338 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1339 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001341 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342
1343 First: no IPv4 options.
1344 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001345 newinet->inet_opt = NULL;
WANG Cong83eadda2017-05-09 16:59:54 -07001346 newnp->ipv6_mc_list = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001347 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001348 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349
1350 /* Clone RX bits */
1351 newnp->rxopt.all = np->rxopt.all;
1352
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001355 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001356 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001357 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001358 if (np->repflow)
1359 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Wei Wang8ef44b62020-12-08 09:55:08 -08001361 /* Set ToS of the new socket based upon the value of incoming SYN.
1362 * ECT bits are set later in tcp_init_transfer().
1363 */
Wei Wangac8f1712020-09-09 17:50:48 -07001364 if (sock_net(sk)->ipv4.sysctl_tcp_reflect_tos)
1365 newnp->tclass = tcp_rsk(req)->syn_tos & ~INET_ECN_MASK;
1366
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 /* Clone native IPv6 options from listening socket (if any)
1368
1369 Yes, keeping reference count would be much more clever,
1370 but we make one more one thing there: reattach optmem
1371 to newsk.
1372 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001373 opt = ireq->ipv6_opt;
1374 if (!opt)
1375 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001376 if (opt) {
1377 opt = ipv6_dup_options(newsk, opt);
1378 RCU_INIT_POINTER(newnp->opt, opt);
1379 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001380 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001381 if (opt)
1382 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1383 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384
Daniel Borkmann81164412015-01-05 23:57:48 +01001385 tcp_ca_openreq_child(newsk, dst);
1386
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001388 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Neal Cardwelld135c522012-04-22 09:45:47 +00001389
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390 tcp_initialize_rcv_mss(newsk);
1391
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001392 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1393 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001395#ifdef CONFIG_TCP_MD5SIG
David Aherndea53bb2019-12-30 14:14:28 -08001396 l3index = l3mdev_master_ifindex_by_index(sock_net(sk), ireq->ir_iif);
1397
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001398 /* Copy over the MD5 key from the original socket */
David Aherndea53bb2019-12-30 14:14:28 -08001399 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr, l3index);
Ian Morris53b24b82015-03-29 14:00:05 +01001400 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001401 /* We're using one, so create a matching key
1402 * on the newsk structure. If we fail to get
1403 * memory, then we end up not copying the key
1404 * across. Shucks.
1405 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001406 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
David Aherndea53bb2019-12-30 14:14:28 -08001407 AF_INET6, 128, l3index, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001408 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001409 }
1410#endif
1411
Balazs Scheidler093d2822010-10-21 13:06:43 +02001412 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001413 inet_csk_prepare_forced_close(newsk);
1414 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001415 goto out;
1416 }
Ricardo Dias01770a12020-11-20 11:11:33 +00001417 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash),
1418 &found_dup_sk);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001419 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001420 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001421
1422 /* Clone pktoptions received with SYN, if we own the req */
1423 if (ireq->pktopts) {
1424 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001425 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001426 consume_skb(ireq->pktopts);
1427 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001428 if (newnp->pktoptions) {
1429 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001430 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001431 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001432 }
Ricardo Dias01770a12020-11-20 11:11:33 +00001433 } else {
1434 if (!req_unhash && found_dup_sk) {
1435 /* This code path should only be executed in the
1436 * syncookie case only
1437 */
1438 bh_unlock_sock(newsk);
1439 sock_put(newsk);
1440 newsk = NULL;
1441 }
Eric Dumazetce105002015-10-30 09:46:12 -07001442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443
1444 return newsk;
1445
1446out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001447 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001448out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001450out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001451 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452 return NULL;
1453}
1454
Brian Vazquezbbd807d2021-02-01 17:41:32 +00001455INDIRECT_CALLABLE_DECLARE(struct dst_entry *ipv4_dst_check(struct dst_entry *,
1456 u32));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001458 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 *
1460 * We have a potential double-lock case here, so even when
1461 * doing backlog processing we use the BH locking scheme.
1462 * This is because we cannot sleep with the original spinlock
1463 * held.
1464 */
1465static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1466{
Eric Dumazet93a77c12019-03-19 07:01:08 -07001467 struct ipv6_pinfo *np = tcp_inet6_sk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 struct sk_buff *opt_skb = NULL;
Eric Dumazet93a77c12019-03-19 07:01:08 -07001469 struct tcp_sock *tp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470
1471 /* Imagine: socket is IPv6. IPv4 packet arrives,
1472 goes to IPv4 receive handler and backlogged.
1473 From backlog it always goes here. Kerboom...
1474 Fortunately, tcp_rcv_established and rcv_established
1475 handle them correctly, but it is not case with
1476 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1477 */
1478
1479 if (skb->protocol == htons(ETH_P_IP))
1480 return tcp_v4_do_rcv(sk, skb);
1481
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 /*
1483 * socket locking is here for SMP purposes as backlog rcv
1484 * is currently called with bh processing disabled.
1485 */
1486
1487 /* Do Stevens' IPV6_PKTOPTIONS.
1488
1489 Yes, guys, it is the only place in our code, where we
1490 may make it not affecting IPv4.
1491 The rest of code is protocol independent,
1492 and I do not like idea to uglify IPv4.
1493
1494 Actually, all the idea behind IPV6_PKTOPTIONS
1495 looks not very well thought. For now we latch
1496 options, received in the last packet, enqueued
1497 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001498 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 */
1500 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001501 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
1503 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001504 struct dst_entry *dst = sk->sk_rx_dst;
1505
Tom Herbertbdeab992011-08-14 19:45:55 +00001506 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001507 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001508 if (dst) {
1509 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
Brian Vazquezbbd807d2021-02-01 17:41:32 +00001510 INDIRECT_CALL_1(dst->ops->check, ip6_dst_check,
1511 dst, np->rx_dst_cookie) == NULL) {
Eric Dumazet5d299f32012-08-06 05:09:33 +00001512 dst_release(dst);
1513 sk->sk_rx_dst = NULL;
1514 }
1515 }
1516
Yafang Shao3d97d882018-05-29 23:27:31 +08001517 tcp_rcv_established(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 if (opt_skb)
1519 goto ipv6_pktoptions;
1520 return 0;
1521 }
1522
Eric Dumazet12e25e12015-06-03 23:49:21 -07001523 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 goto csum_err;
1525
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001526 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001527 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 if (!nsk)
1530 goto discard;
1531
Weilong Chen4c99aa42013-12-19 18:44:34 +08001532 if (nsk != sk) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 if (tcp_child_process(sk, nsk, skb))
1534 goto reset;
1535 if (opt_skb)
1536 __kfree_skb(opt_skb);
1537 return 0;
1538 }
Neil Horman47482f132011-04-06 13:07:09 -07001539 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001540 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001542 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001544 if (opt_skb)
1545 goto ipv6_pktoptions;
1546 return 0;
1547
1548reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001549 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550discard:
1551 if (opt_skb)
1552 __kfree_skb(opt_skb);
1553 kfree_skb(skb);
1554 return 0;
1555csum_err:
Jakub Kicinski709c0312021-05-14 13:04:25 -07001556 trace_tcp_bad_csum(skb);
Eric Dumazetc10d9312016-04-29 14:16:47 -07001557 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1558 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 goto discard;
1560
1561
1562ipv6_pktoptions:
1563 /* Do you ask, what is it?
1564
1565 1. skb was enqueued by tcp.
1566 2. skb is added to tail of read queue, rather than out of order.
1567 3. socket is not in passive state.
1568 4. Finally, it really contains options, which user wants to receive.
1569 */
1570 tp = tcp_sk(sk);
1571 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1572 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001573 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001574 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001575 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001576 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001577 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001578 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001579 if (np->repflow)
1580 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001581 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001583 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 opt_skb = xchg(&np->pktoptions, opt_skb);
1585 } else {
1586 __kfree_skb(opt_skb);
1587 opt_skb = xchg(&np->pktoptions, NULL);
1588 }
1589 }
1590
Wei Yongjun800d55f2009-02-23 21:45:33 +00001591 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 return 0;
1593}
1594
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001595static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1596 const struct tcphdr *th)
1597{
1598 /* This is tricky: we move IP6CB at its correct location into
1599 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1600 * _decode_session6() uses IP6CB().
1601 * barrier() makes sure compiler won't play aliasing games.
1602 */
1603 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1604 sizeof(struct inet6_skb_parm));
1605 barrier();
1606
1607 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1608 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1609 skb->len - th->doff*4);
1610 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1611 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1612 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1613 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1614 TCP_SKB_CB(skb)->sacked = 0;
Mike Maloney98aaa912017-08-22 17:08:48 -04001615 TCP_SKB_CB(skb)->has_rxtstamp =
1616 skb->tstamp || skb_hwtstamps(skb)->hwtstamp;
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001617}
1618
Paolo Abeni0e219ae2019-05-03 17:01:37 +02001619INDIRECT_CALLABLE_SCOPE int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620{
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001621 struct sk_buff *skb_to_free;
David Ahern4297a0e2017-08-07 08:44:21 -07001622 int sdif = inet6_sdif(skb);
David Ahernd14c77e2019-12-30 14:14:26 -08001623 int dif = inet6_iif(skb);
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001624 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001625 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001626 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 struct sock *sk;
1628 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001629 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
1631 if (skb->pkt_type != PACKET_HOST)
1632 goto discard_it;
1633
1634 /*
1635 * Count it even if it's bad.
1636 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001637 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
1639 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1640 goto discard_it;
1641
Eric Dumazetea1627c2016-05-13 09:16:40 -07001642 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643
Eric Dumazetea1627c2016-05-13 09:16:40 -07001644 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645 goto bad_packet;
1646 if (!pskb_may_pull(skb, th->doff*4))
1647 goto discard_it;
1648
Tom Herberte4f45b72014-05-02 16:29:51 -07001649 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001650 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651
Eric Dumazetea1627c2016-05-13 09:16:40 -07001652 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001653 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001655lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001656 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
David Ahern4297a0e2017-08-07 08:44:21 -07001657 th->source, th->dest, inet6_iif(skb), sdif,
Eric Dumazet3b24d852016-04-01 08:52:17 -07001658 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 if (!sk)
1660 goto no_tcp_socket;
1661
1662process:
1663 if (sk->sk_state == TCP_TIME_WAIT)
1664 goto do_time_wait;
1665
Eric Dumazet079096f2015-10-02 11:43:32 -07001666 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1667 struct request_sock *req = inet_reqsk(sk);
Eric Dumazete0f97592018-02-13 06:14:12 -08001668 bool req_stolen = false;
Eric Dumazet77166822016-02-18 05:39:18 -08001669 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001670
1671 sk = req->rsk_listener;
David Ahernd14c77e2019-12-30 14:14:26 -08001672 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001673 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001674 reqsk_put(req);
1675 goto discard_it;
1676 }
Frank van der Linden4fd44a92018-06-12 23:09:37 +00001677 if (tcp_checksum_complete(skb)) {
1678 reqsk_put(req);
1679 goto csum_error;
1680 }
Eric Dumazet77166822016-02-18 05:39:18 -08001681 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Kuniyuki Iwashimad4f2c862021-06-12 21:32:20 +09001682 nsk = reuseport_migrate_sock(sk, req_to_sk(req), skb);
1683 if (!nsk) {
1684 inet_csk_reqsk_queue_drop_and_put(sk, req);
1685 goto lookup;
1686 }
1687 sk = nsk;
1688 /* reuseport_migrate_sock() has already held one sk_refcnt
1689 * before returning.
1690 */
1691 } else {
1692 sock_hold(sk);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001693 }
Eric Dumazet3b24d852016-04-01 08:52:17 -07001694 refcounted = true;
Eric Dumazet1f3b3592017-09-08 12:44:47 -07001695 nsk = NULL;
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001696 if (!tcp_filter(sk, skb)) {
1697 th = (const struct tcphdr *)skb->data;
1698 hdr = ipv6_hdr(skb);
1699 tcp_v6_fill_cb(skb, hdr, th);
Eric Dumazete0f97592018-02-13 06:14:12 -08001700 nsk = tcp_check_req(sk, skb, req, false, &req_stolen);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001701 }
Eric Dumazet079096f2015-10-02 11:43:32 -07001702 if (!nsk) {
1703 reqsk_put(req);
Eric Dumazete0f97592018-02-13 06:14:12 -08001704 if (req_stolen) {
1705 /* Another cpu got exclusive access to req
1706 * and created a full blown socket.
1707 * Try to feed this packet to this socket
1708 * instead of discarding it.
1709 */
1710 tcp_v6_restore_cb(skb);
1711 sock_put(sk);
1712 goto lookup;
1713 }
Eric Dumazet77166822016-02-18 05:39:18 -08001714 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001715 }
1716 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001717 reqsk_put(req);
1718 tcp_v6_restore_cb(skb);
1719 } else if (tcp_child_process(sk, nsk, skb)) {
1720 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001721 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001722 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001723 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001724 return 0;
1725 }
1726 }
Eric Dumazet93a77c12019-03-19 07:01:08 -07001727 if (hdr->hop_limit < tcp_inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001728 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001729 goto discard_and_relse;
1730 }
1731
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1733 goto discard_and_relse;
1734
David Ahernd14c77e2019-12-30 14:14:26 -08001735 if (tcp_v6_inbound_md5_hash(sk, skb, dif, sdif))
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001736 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001737
Eric Dumazetac6e7802016-11-10 13:12:35 -08001738 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001740 th = (const struct tcphdr *)skb->data;
1741 hdr = ipv6_hdr(skb);
Eric Dumazeteeea10b2017-12-03 09:32:59 -08001742 tcp_v6_fill_cb(skb, hdr, th);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
1744 skb->dev = NULL;
1745
Eric Dumazete994b2f2015-10-02 11:43:39 -07001746 if (sk->sk_state == TCP_LISTEN) {
1747 ret = tcp_v6_do_rcv(sk, skb);
1748 goto put_and_return;
1749 }
1750
1751 sk_incoming_cpu_update(sk);
1752
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001753 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001754 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 ret = 0;
1756 if (!sock_owned_by_user(sk)) {
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001757 skb_to_free = sk->sk_rx_skb_cache;
1758 sk->sk_rx_skb_cache = NULL;
Florian Westphale7942d02017-07-30 03:57:18 +02001759 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001760 } else {
1761 if (tcp_add_backlog(sk, skb))
1762 goto discard_and_relse;
1763 skb_to_free = NULL;
Zhu Yi6b03a532010-03-04 18:01:41 +00001764 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 bh_unlock_sock(sk);
Eric Dumazet8b27dae2019-03-22 08:56:40 -07001766 if (skb_to_free)
1767 __kfree_skb(skb_to_free);
Eric Dumazete994b2f2015-10-02 11:43:39 -07001768put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001769 if (refcounted)
1770 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 return ret ? -1 : 0;
1772
1773no_tcp_socket:
1774 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1775 goto discard_it;
1776
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001777 tcp_v6_fill_cb(skb, hdr, th);
1778
Eric Dumazet12e25e12015-06-03 23:49:21 -07001779 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001780csum_error:
Jakub Kicinski709c0312021-05-14 13:04:25 -07001781 trace_tcp_bad_csum(skb);
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001782 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001784 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001786 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 }
1788
1789discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 kfree_skb(skb);
1791 return 0;
1792
1793discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001794 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001795 if (refcounted)
1796 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 goto discard_it;
1798
1799do_time_wait:
1800 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001801 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 goto discard_it;
1803 }
1804
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001805 tcp_v6_fill_cb(skb, hdr, th);
1806
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001807 if (tcp_checksum_complete(skb)) {
1808 inet_twsk_put(inet_twsk(sk));
1809 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 }
1811
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001812 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 case TCP_TW_SYN:
1814 {
1815 struct sock *sk2;
1816
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001817 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001818 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001819 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001820 &ipv6_hdr(skb)->daddr,
David Ahern24b711e2018-07-19 12:41:18 -07001821 ntohs(th->dest),
1822 tcp_v6_iif_l3_slave(skb),
David Ahern4297a0e2017-08-07 08:44:21 -07001823 sdif);
Ian Morris53b24b82015-03-29 14:00:05 +01001824 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001825 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001826 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001828 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001829 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 goto process;
1831 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832 }
Gustavo A. R. Silva275757e62017-10-16 16:36:52 -05001833 /* to ACK */
Joe Perchesa8eceea2020-03-12 15:50:22 -07001834 fallthrough;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 case TCP_TW_ACK:
1836 tcp_v6_timewait_ack(sk, skb);
1837 break;
1838 case TCP_TW_RST:
Florian Westphal271c3b92015-12-21 21:29:26 +01001839 tcp_v6_send_reset(sk, skb);
1840 inet_twsk_deschedule_put(inet_twsk(sk));
1841 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001842 case TCP_TW_SUCCESS:
1843 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844 }
1845 goto discard_it;
1846}
1847
Paolo Abeni97ff7ff2019-05-03 17:01:38 +02001848INDIRECT_CALLABLE_SCOPE void tcp_v6_early_demux(struct sk_buff *skb)
Eric Dumazetc7109982012-07-26 12:18:11 +00001849{
1850 const struct ipv6hdr *hdr;
1851 const struct tcphdr *th;
1852 struct sock *sk;
1853
1854 if (skb->pkt_type != PACKET_HOST)
1855 return;
1856
1857 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1858 return;
1859
1860 hdr = ipv6_hdr(skb);
1861 th = tcp_hdr(skb);
1862
1863 if (th->doff < sizeof(struct tcphdr) / 4)
1864 return;
1865
Eric Dumazet870c3152014-10-17 09:17:20 -07001866 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001867 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1868 &hdr->saddr, th->source,
1869 &hdr->daddr, ntohs(th->dest),
David Ahern4297a0e2017-08-07 08:44:21 -07001870 inet6_iif(skb), inet6_sdif(skb));
Eric Dumazetc7109982012-07-26 12:18:11 +00001871 if (sk) {
1872 skb->sk = sk;
1873 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001874 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001875 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001876
Eric Dumazetc7109982012-07-26 12:18:11 +00001877 if (dst)
Eric Dumazet93a77c12019-03-19 07:01:08 -07001878 dst = dst_check(dst, tcp_inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001879 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001880 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001881 skb_dst_set_noref(skb, dst);
1882 }
1883 }
1884}
1885
David S. Millerccb7c412010-12-01 18:09:13 -08001886static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1887 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1888 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001889 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001890};
1891
Eric Dumazetdd2e0b82020-06-19 12:12:35 -07001892INDIRECT_CALLABLE_SCOPE void tcp_v6_send_check(struct sock *sk, struct sk_buff *skb)
1893{
1894 struct ipv6_pinfo *np = inet6_sk(sk);
1895
1896 __tcp_v6_send_check(skb, &np->saddr, &sk->sk_v6_daddr);
1897}
1898
Mat Martineau35b2c322020-01-09 07:59:21 -08001899const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001900 .queue_xmit = inet6_csk_xmit,
1901 .send_check = tcp_v6_send_check,
1902 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001903 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001904 .conn_request = tcp_v6_conn_request,
1905 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001906 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001907 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001908 .setsockopt = ipv6_setsockopt,
1909 .getsockopt = ipv6_getsockopt,
1910 .addr2sockaddr = inet6_csk_addr2sockaddr,
1911 .sockaddr_len = sizeof(struct sockaddr_in6),
Neal Cardwell4fab9072014-08-14 12:40:05 -04001912 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913};
1914
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001915#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001916static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001917 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001918 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001919 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001920};
David S. Millera9286302006-11-14 19:53:22 -08001921#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001922
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923/*
1924 * TCP over IPv4 via INET6 API
1925 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001926static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001927 .queue_xmit = ip_queue_xmit,
1928 .send_check = tcp_v4_send_check,
1929 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001930 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001931 .conn_request = tcp_v6_conn_request,
1932 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001933 .net_header_len = sizeof(struct iphdr),
1934 .setsockopt = ipv6_setsockopt,
1935 .getsockopt = ipv6_getsockopt,
1936 .addr2sockaddr = inet6_csk_addr2sockaddr,
1937 .sockaddr_len = sizeof(struct sockaddr_in6),
Neal Cardwell4fab9072014-08-14 12:40:05 -04001938 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001939};
1940
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001941#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001942static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001943 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001944 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001945 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001946};
David S. Millera9286302006-11-14 19:53:22 -08001947#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001948
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949/* NOTE: A lot of things set to zero explicitly by call to
1950 * sk_alloc() so need not be done here.
1951 */
1952static int tcp_v6_init_sock(struct sock *sk)
1953{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001954 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001955
Neal Cardwell900f65d2012-04-19 09:55:21 +00001956 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001957
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001958 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001959
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001960#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001961 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001962#endif
1963
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 return 0;
1965}
1966
Brian Haley7d06b2e2008-06-14 17:04:49 -07001967static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001970 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971}
1972
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001973#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001975static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001976 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001978 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001979 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1980 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
1982 if (ttd < 0)
1983 ttd = 0;
1984
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 seq_printf(seq,
1986 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001987 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 i,
1989 src->s6_addr32[0], src->s6_addr32[1],
1990 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001991 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 dest->s6_addr32[0], dest->s6_addr32[1],
1993 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001994 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001996 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001997 1, /* timers active (only the expire timer) */
1998 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001999 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002000 from_kuid_munged(seq_user_ns(seq),
2001 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002002 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003 0, /* open_requests have no inode */
2004 0, req);
2005}
2006
2007static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
2008{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00002009 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 __u16 destp, srcp;
2011 int timer_active;
2012 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04002013 const struct inet_sock *inet = inet_sk(sp);
2014 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002015 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002016 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002017 int rx_queue;
2018 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
Eric Dumazetefe42082013-10-03 15:42:29 -07002020 dest = &sp->sk_v6_daddr;
2021 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00002022 destp = ntohs(inet->inet_dport);
2023 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002024
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07002025 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08002026 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07002027 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002029 timer_expires = icsk->icsk_timeout;
2030 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002032 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033 } else if (timer_pending(&sp->sk_timer)) {
2034 timer_active = 2;
2035 timer_expires = sp->sk_timer.expires;
2036 } else {
2037 timer_active = 0;
2038 timer_expires = jiffies;
2039 }
2040
Yafang Shao986ffdf2017-12-20 11:12:52 +08002041 state = inet_sk_state_load(sp);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002042 if (state == TCP_LISTEN)
Eric Dumazet288efe82019-11-05 14:11:53 -08002043 rx_queue = READ_ONCE(sp->sk_ack_backlog);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002044 else
2045 /* Because we don't lock the socket,
2046 * we might find a transient negative value.
2047 */
Eric Dumazetdba7d9b2019-10-10 20:17:39 -07002048 rx_queue = max_t(int, READ_ONCE(tp->rcv_nxt) -
Eric Dumazet7db48e92019-10-10 20:17:40 -07002049 READ_ONCE(tp->copied_seq), 0);
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002050
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 seq_printf(seq,
2052 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02002053 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 i,
2055 src->s6_addr32[0], src->s6_addr32[1],
2056 src->s6_addr32[2], src->s6_addr32[3], srcp,
2057 dest->s6_addr32[0], dest->s6_addr32[1],
2058 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002059 state,
Eric Dumazet0f317462019-10-10 20:17:41 -07002060 READ_ONCE(tp->write_seq) - tp->snd_una,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002061 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00002063 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002064 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06002065 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002066 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 sock_i_ino(sp),
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002068 refcount_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07002069 jiffies_to_clock_t(icsk->icsk_rto),
2070 jiffies_to_clock_t(icsk->icsk_ack.ato),
Wei Wang31954cd2019-01-25 10:53:19 -08002071 (icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07002072 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08002073 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07002074 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07002075 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 );
2077}
2078
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002079static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07002080 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081{
Eric Dumazet789f5582015-04-12 18:51:09 -07002082 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00002083 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085
Eric Dumazetefe42082013-10-03 15:42:29 -07002086 dest = &tw->tw_v6_daddr;
2087 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002088 destp = ntohs(tw->tw_dport);
2089 srcp = ntohs(tw->tw_sport);
2090
2091 seq_printf(seq,
2092 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00002093 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 i,
2095 src->s6_addr32[0], src->s6_addr32[1],
2096 src->s6_addr32[2], src->s6_addr32[3], srcp,
2097 dest->s6_addr32[0], dest->s6_addr32[1],
2098 dest->s6_addr32[2], dest->s6_addr32[3], destp,
2099 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00002100 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Reshetova, Elena41c6d652017-06-30 13:08:01 +03002101 refcount_read(&tw->tw_refcnt), tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102}
2103
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104static int tcp6_seq_show(struct seq_file *seq, void *v)
2105{
2106 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07002107 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
2109 if (v == SEQ_START_TOKEN) {
2110 seq_puts(seq,
2111 " sl "
2112 "local_address "
2113 "remote_address "
2114 "st tx_queue rx_queue tr tm->when retrnsmt"
2115 " uid timeout inode\n");
2116 goto out;
2117 }
2118 st = seq->private;
2119
Eric Dumazet079096f2015-10-02 11:43:32 -07002120 if (sk->sk_state == TCP_TIME_WAIT)
2121 get_timewait6_sock(seq, v, st->num);
2122 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07002123 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07002124 else
2125 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126out:
2127 return 0;
2128}
2129
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002130static const struct seq_operations tcp6_seq_ops = {
2131 .show = tcp6_seq_show,
2132 .start = tcp_seq_start,
2133 .next = tcp_seq_next,
2134 .stop = tcp_seq_stop,
2135};
2136
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139};
2140
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002141int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142{
Christoph Hellwigc3506372018-04-10 19:42:55 +02002143 if (!proc_create_net_data("tcp6", 0444, net->proc_net, &tcp6_seq_ops,
2144 sizeof(struct tcp_iter_state), &tcp6_seq_afinfo))
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002145 return -ENOMEM;
2146 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147}
2148
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07002149void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150{
Christoph Hellwig37d849b2018-04-11 09:31:28 +02002151 remove_proc_entry("tcp6", net->proc_net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152}
2153#endif
2154
2155struct proto tcpv6_prot = {
2156 .name = "TCPv6",
2157 .owner = THIS_MODULE,
2158 .close = tcp_close,
Andrey Ignatovd74bad42018-03-30 15:08:05 -07002159 .pre_connect = tcp_v6_pre_connect,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 .connect = tcp_v6_connect,
2161 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002162 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 .ioctl = tcp_ioctl,
2164 .init = tcp_v6_init_sock,
2165 .destroy = tcp_v6_destroy_sock,
2166 .shutdown = tcp_shutdown,
2167 .setsockopt = tcp_setsockopt,
2168 .getsockopt = tcp_getsockopt,
Stanislav Fomichev9cacf812021-01-15 08:34:59 -08002169 .bpf_bypass_getsockopt = tcp_bpf_bypass_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01002170 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00002172 .sendmsg = tcp_sendmsg,
2173 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00002175 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05002176 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08002177 .unhash = inet_unhash,
2178 .get_port = inet_csk_get_port,
Cong Wang8a59f9d2021-03-30 19:32:31 -07002179#ifdef CONFIG_BPF_SYSCALL
2180 .psock_update_sk_prot = tcp_bpf_update_proto,
2181#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazet06044752017-06-07 13:29:12 -07002183 .leave_memory_pressure = tcp_leave_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07002184 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 .sockets_allocated = &tcp_sockets_allocated,
2186 .memory_allocated = &tcp_memory_allocated,
2187 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07002188 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07002189 .sysctl_mem = sysctl_tcp_mem,
Eric Dumazet356d1832017-11-07 00:29:28 -08002190 .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem),
2191 .sysctl_rmem_offset = offsetof(struct net, ipv4.sysctl_tcp_rmem),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 .max_header = MAX_TCP_HEADER,
2193 .obj_size = sizeof(struct tcp6_sock),
Paul E. McKenney5f0d5a32017-01-18 02:53:44 -08002194 .slab_flags = SLAB_TYPESAFE_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08002195 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07002196 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07002197 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00002198 .no_autobind = true,
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09002199 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200};
Vinay Kumar Yadav6abde0b2020-06-02 00:07:05 +05302201EXPORT_SYMBOL_GPL(tcpv6_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
David Aherna8e3bb32017-08-28 15:14:20 -07002203/* thinking of making this const? Don't.
2204 * early_demux can change based on sysctl.
2205 */
Julia Lawall39294c32017-08-01 18:27:28 +02002206static struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00002207 .early_demux = tcp_v6_early_demux,
subashab@codeaurora.orgdddb64b2017-03-23 13:34:16 -06002208 .early_demux_handler = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 .handler = tcp_v6_rcv,
2210 .err_handler = tcp_v6_err,
2211 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
2212};
2213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214static struct inet_protosw tcpv6_protosw = {
2215 .type = SOCK_STREAM,
2216 .protocol = IPPROTO_TCP,
2217 .prot = &tcpv6_prot,
2218 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002219 .flags = INET_PROTOSW_PERMANENT |
2220 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221};
2222
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002223static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002224{
Denis V. Lunev56772422008-04-03 14:28:30 -07002225 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
2226 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002227}
2228
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002229static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002230{
Denis V. Lunev56772422008-04-03 14:28:30 -07002231 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002232}
2233
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00002234static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002235{
Haishuang Yan1946e672016-12-28 17:52:32 +08002236 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002237}
2238
2239static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00002240 .init = tcpv6_net_init,
2241 .exit = tcpv6_net_exit,
2242 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002243};
2244
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002245int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002247 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08002248
Vlad Yasevich33362882012-11-15 08:49:15 +00002249 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
2250 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00002251 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00002252
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002253 /* register inet6 protocol */
2254 ret = inet6_register_protosw(&tcpv6_protosw);
2255 if (ret)
2256 goto out_tcpv6_protocol;
2257
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002258 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002259 if (ret)
2260 goto out_tcpv6_protosw;
Mat Martineauf870fa02020-01-21 16:56:15 -08002261
2262 ret = mptcpv6_init();
2263 if (ret)
2264 goto out_tcpv6_pernet_subsys;
2265
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002266out:
2267 return ret;
2268
Mat Martineauf870fa02020-01-21 16:56:15 -08002269out_tcpv6_pernet_subsys:
2270 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002271out_tcpv6_protosw:
2272 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00002273out_tcpv6_protocol:
2274 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002275 goto out;
2276}
2277
Daniel Lezcano09f77092007-12-13 05:34:58 -08002278void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002279{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08002280 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08002281 inet6_unregister_protosw(&tcpv6_protosw);
2282 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283}