blob: 0f08d718a00238b228d859d2c0a1dab10db57125 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * TCP over IPv6
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003 * Linux INET6 implementation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09008 * Based on:
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * linux/net/ipv4/tcp.c
10 * linux/net/ipv4/tcp_input.c
11 * linux/net/ipv4/tcp_output.c
12 *
13 * Fixes:
14 * Hideaki YOSHIFUJI : sin6_scope_id support
15 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
16 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
17 * a single port at the same time.
18 * YOSHIFUJI Hideaki @USAGI: convert /proc/net/tcp6 to seq_file.
19 *
20 * This program is free software; you can redistribute it and/or
21 * modify it under the terms of the GNU General Public License
22 * as published by the Free Software Foundation; either version
23 * 2 of the License, or (at your option) any later version.
24 */
25
Herbert Xueb4dea52008-12-29 23:04:08 -080026#include <linux/bottom_half.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/errno.h>
29#include <linux/types.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/jiffies.h>
34#include <linux/in.h>
35#include <linux/in6.h>
36#include <linux/netdevice.h>
37#include <linux/init.h>
38#include <linux/jhash.h>
39#include <linux/ipsec.h>
40#include <linux/times.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090041#include <linux/slab.h>
Wang Yufen4aa956d2014-03-29 09:27:29 +080042#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/ipv6.h>
44#include <linux/icmpv6.h>
45#include <linux/random.h>
46
47#include <net/tcp.h>
48#include <net/ndisc.h>
Arnaldo Carvalho de Melo5324a042005-08-12 09:26:18 -030049#include <net/inet6_hashtables.h>
Arnaldo Carvalho de Melo81297652005-12-13 23:15:24 -080050#include <net/inet6_connection_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070051#include <net/ipv6.h>
52#include <net/transp_v6.h>
53#include <net/addrconf.h>
54#include <net/ip6_route.h>
55#include <net/ip6_checksum.h>
56#include <net/inet_ecn.h>
57#include <net/protocol.h>
58#include <net/xfrm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/snmp.h>
60#include <net/dsfield.h>
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -080061#include <net/timewait_sock.h>
Denis V. Lunev3d58b5f2008-04-03 14:22:32 -070062#include <net/inet_common.h>
David S. Miller6e5714e2011-08-03 20:50:44 -070063#include <net/secure_seq.h>
Eliezer Tamir076bb0c2013-07-10 17:13:17 +030064#include <net/busy_poll.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
Linus Torvalds1da177e2005-04-16 15:20:36 -070066#include <linux/proc_fs.h>
67#include <linux/seq_file.h>
68
Herbert Xucf80e0e2016-01-24 21:20:23 +080069#include <crypto/hash.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080070#include <linux/scatterlist.h>
71
Eric Dumazeta00e7442015-09-29 07:42:39 -070072static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb);
73static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -070074 struct request_sock *req);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
76static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Stephen Hemminger3b401a82009-09-01 19:25:04 +000078static const struct inet_connection_sock_af_ops ipv6_mapped;
79static const struct inet_connection_sock_af_ops ipv6_specific;
David S. Millera9286302006-11-14 19:53:22 -080080#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +000081static const struct tcp_sock_af_ops tcp_sock_ipv6_specific;
82static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific;
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090083#else
Eric Dumazet51723932015-09-29 21:24:05 -070084static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +000085 const struct in6_addr *addr)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +090086{
87 return NULL;
88}
David S. Millera9286302006-11-14 19:53:22 -080089#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Neal Cardwellfae6ef82012-08-19 03:30:38 +000091static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
92{
93 struct dst_entry *dst = skb_dst(skb);
Neal Cardwellfae6ef82012-08-19 03:30:38 +000094
Eric Dumazet5037e9e2015-12-14 14:08:53 -080095 if (dst && dst_hold_safe(dst)) {
Eric Dumazetca777ef2014-09-08 08:06:07 -070096 const struct rt6_info *rt = (const struct rt6_info *)dst;
97
Eric Dumazetca777ef2014-09-08 08:06:07 -070098 sk->sk_rx_dst = dst;
99 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
Martin KaFai Laub197df42015-05-22 20:56:01 -0700100 inet6_sk(sk)->rx_dst_cookie = rt6_get_cookie(rt);
Eric Dumazetca777ef2014-09-08 08:06:07 -0700101 }
Neal Cardwellfae6ef82012-08-19 03:30:38 +0000102}
103
Alexey Kodaneva30aad52017-03-09 13:53:55 +0300104static u32 tcp_v6_init_seq_and_tsoff(const struct sk_buff *skb, u32 *tsoff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
Alexey Kodaneva30aad52017-03-09 13:53:55 +0300106 return secure_tcpv6_seq_and_tsoff(ipv6_hdr(skb)->daddr.s6_addr32,
107 ipv6_hdr(skb)->saddr.s6_addr32,
108 tcp_hdr(skb)->dest,
109 tcp_hdr(skb)->source, tsoff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110}
111
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900112static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 int addr_len)
114{
115 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900116 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800117 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 struct ipv6_pinfo *np = inet6_sk(sk);
119 struct tcp_sock *tp = tcp_sk(sk);
Arnaud Ebalard20c59de2010-06-01 21:35:01 +0000120 struct in6_addr *saddr = NULL, *final_p, final;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800121 struct ipv6_txoptions *opt;
David S. Miller4c9483b2011-03-12 16:22:43 -0500122 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 struct dst_entry *dst;
124 int addr_type;
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300125 u32 seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 int err;
Haishuang Yan1946e672016-12-28 17:52:32 +0800127 struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900129 if (addr_len < SIN6_LEN_RFC2133)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 return -EINVAL;
131
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900132 if (usin->sin6_family != AF_INET6)
Eric Dumazeta02cec22010-09-22 20:43:57 +0000133 return -EAFNOSUPPORT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
David S. Miller4c9483b2011-03-12 16:22:43 -0500135 memset(&fl6, 0, sizeof(fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
137 if (np->sndflow) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500138 fl6.flowlabel = usin->sin6_flowinfo&IPV6_FLOWINFO_MASK;
139 IP6_ECN_flow_init(fl6.flowlabel);
140 if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 struct ip6_flowlabel *flowlabel;
David S. Miller4c9483b2011-03-12 16:22:43 -0500142 flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
Ian Morris63159f22015-03-29 14:00:04 +0100143 if (!flowlabel)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 fl6_sock_release(flowlabel);
146 }
147 }
148
149 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900150 * connect() to INADDR_ANY means loopback (BSD'ism).
151 */
152
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500153 if (ipv6_addr_any(&usin->sin6_addr)) {
154 if (ipv6_addr_v4mapped(&sk->sk_v6_rcv_saddr))
155 ipv6_addr_set_v4mapped(htonl(INADDR_LOOPBACK),
156 &usin->sin6_addr);
157 else
158 usin->sin6_addr = in6addr_loopback;
159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
161 addr_type = ipv6_addr_type(&usin->sin6_addr);
162
Weilong Chen4c99aa42013-12-19 18:44:34 +0800163 if (addr_type & IPV6_ADDR_MULTICAST)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 return -ENETUNREACH;
165
166 if (addr_type&IPV6_ADDR_LINKLOCAL) {
167 if (addr_len >= sizeof(struct sockaddr_in6) &&
168 usin->sin6_scope_id) {
169 /* If interface is set while binding, indices
170 * must coincide.
171 */
172 if (sk->sk_bound_dev_if &&
173 sk->sk_bound_dev_if != usin->sin6_scope_id)
174 return -EINVAL;
175
176 sk->sk_bound_dev_if = usin->sin6_scope_id;
177 }
178
179 /* Connect to link-local address requires an interface */
180 if (!sk->sk_bound_dev_if)
181 return -EINVAL;
182 }
183
184 if (tp->rx_opt.ts_recent_stamp &&
Eric Dumazetefe42082013-10-03 15:42:29 -0700185 !ipv6_addr_equal(&sk->sk_v6_daddr, &usin->sin6_addr)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186 tp->rx_opt.ts_recent = 0;
187 tp->rx_opt.ts_recent_stamp = 0;
188 tp->write_seq = 0;
189 }
190
Eric Dumazetefe42082013-10-03 15:42:29 -0700191 sk->sk_v6_daddr = usin->sin6_addr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500192 np->flow_label = fl6.flowlabel;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 /*
195 * TCP over IPv4
196 */
197
Jonathan T. Leighton052d2362017-02-12 17:26:07 -0500198 if (addr_type & IPV6_ADDR_MAPPED) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800199 u32 exthdrlen = icsk->icsk_ext_hdr_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 struct sockaddr_in sin;
201
202 SOCK_DEBUG(sk, "connect: ipv4 mapped\n");
203
204 if (__ipv6_only_sock(sk))
205 return -ENETUNREACH;
206
207 sin.sin_family = AF_INET;
208 sin.sin_port = usin->sin6_port;
209 sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
210
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800211 icsk->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 sk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800213#ifdef CONFIG_TCP_MD5SIG
214 tp->af_specific = &tcp_sock_ipv6_mapped_specific;
215#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
217 err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
218
219 if (err) {
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800220 icsk->icsk_ext_hdr_len = exthdrlen;
221 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 sk->sk_backlog_rcv = tcp_v6_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800223#ifdef CONFIG_TCP_MD5SIG
224 tp->af_specific = &tcp_sock_ipv6_specific;
225#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 }
Eric Dumazetd1e559d2015-03-18 14:05:35 -0700228 np->saddr = sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230 return err;
231 }
232
Eric Dumazetefe42082013-10-03 15:42:29 -0700233 if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr))
234 saddr = &sk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
David S. Miller4c9483b2011-03-12 16:22:43 -0500236 fl6.flowi6_proto = IPPROTO_TCP;
Eric Dumazetefe42082013-10-03 15:42:29 -0700237 fl6.daddr = sk->sk_v6_daddr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000238 fl6.saddr = saddr ? *saddr : np->saddr;
David S. Miller4c9483b2011-03-12 16:22:43 -0500239 fl6.flowi6_oif = sk->sk_bound_dev_if;
240 fl6.flowi6_mark = sk->sk_mark;
David S. Miller1958b852011-03-12 16:36:19 -0500241 fl6.fl6_dport = usin->sin6_port;
242 fl6.fl6_sport = inet->inet_sport;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900243 fl6.flowi6_uid = sk->sk_uid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244
Hannes Frederic Sowa1e1d04e2016-04-05 17:10:15 +0200245 opt = rcu_dereference_protected(np->opt, lockdep_sock_is_held(sk));
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800246 final_p = fl6_update_dst(&fl6, opt, &final);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
David S. Miller4c9483b2011-03-12 16:22:43 -0500248 security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
Venkat Yekkiralabeb8d132006-08-04 23:12:42 -0700249
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200250 dst = ip6_dst_lookup_flow(sk, &fl6, final_p);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800251 if (IS_ERR(dst)) {
252 err = PTR_ERR(dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 goto failure;
David S. Miller14e50e52007-05-24 18:17:54 -0700254 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
Ian Morris63159f22015-03-29 14:00:04 +0100256 if (!saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -0500257 saddr = &fl6.saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700258 sk->sk_v6_rcv_saddr = *saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 }
260
261 /* set the source address */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000262 np->saddr = *saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000263 inet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
Herbert Xuf83ef8c2006-06-30 13:37:03 -0700265 sk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -0800266 ip6_dst_store(sk, dst, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -0800268 icsk->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -0800269 if (opt)
270 icsk->icsk_ext_hdr_len = opt->opt_flen +
271 opt->opt_nflen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
273 tp->rx_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
274
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000275 inet->inet_dport = usin->sin6_port;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
277 tcp_set_state(sk, TCP_SYN_SENT);
Haishuang Yan1946e672016-12-28 17:52:32 +0800278 err = inet6_hash_connect(tcp_death_row, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 if (err)
280 goto late_failure;
281
Tom Herbert877d1f62015-07-28 16:02:05 -0700282 sk_set_txhash(sk);
Sathya Perla9e7ceb02014-10-22 21:42:01 +0530283
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300284 if (likely(!tp->repair)) {
Alexey Kodaneva30aad52017-03-09 13:53:55 +0300285 seq = secure_tcpv6_seq_and_tsoff(np->saddr.s6_addr32,
286 sk->sk_v6_daddr.s6_addr32,
287 inet->inet_sport,
288 inet->inet_dport,
289 &tp->tsoffset);
Alexey Kodanev00355fa2017-02-22 13:23:55 +0300290 if (!tp->write_seq)
291 tp->write_seq = seq;
292 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
Wei Wang19f6d3f2017-01-23 10:59:22 -0800294 if (tcp_fastopen_defer_connect(sk, &err))
295 return err;
296 if (err)
297 goto late_failure;
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 err = tcp_connect(sk);
300 if (err)
301 goto late_failure;
302
303 return 0;
304
305late_failure:
306 tcp_set_state(sk, TCP_CLOSE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307failure:
Eric Dumazetc720c7e82009-10-15 06:30:45 +0000308 inet->inet_dport = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 sk->sk_route_caps = 0;
310 return err;
311}
312
Eric Dumazet563d34d2012-07-23 09:48:52 +0200313static void tcp_v6_mtu_reduced(struct sock *sk)
314{
315 struct dst_entry *dst;
316
317 if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))
318 return;
319
320 dst = inet6_csk_update_pmtu(sk, tcp_sk(sk)->mtu_info);
321 if (!dst)
322 return;
323
324 if (inet_csk(sk)->icsk_pmtu_cookie > dst_mtu(dst)) {
325 tcp_sync_mss(sk, dst_mtu(dst));
326 tcp_simple_retransmit(sk);
327 }
328}
329
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
Brian Haleyd5fdd6b2009-06-23 04:31:07 -0700331 u8 type, u8 code, int offset, __be32 info)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332{
Weilong Chen4c99aa42013-12-19 18:44:34 +0800333 const struct ipv6hdr *hdr = (const struct ipv6hdr *)skb->data;
Arnaldo Carvalho de Melo505cbfc2005-08-12 09:19:38 -0300334 const struct tcphdr *th = (struct tcphdr *)(skb->data+offset);
Eric Dumazet22150892015-03-22 10:22:23 -0700335 struct net *net = dev_net(skb->dev);
336 struct request_sock *fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 struct ipv6_pinfo *np;
Eric Dumazet22150892015-03-22 10:22:23 -0700338 struct tcp_sock *tp;
339 __u32 seq, snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 struct sock *sk;
Eric Dumazet9cf74902016-02-02 19:31:12 -0800341 bool fatal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
Eric Dumazet22150892015-03-22 10:22:23 -0700344 sk = __inet6_lookup_established(net, &tcp_hashinfo,
345 &hdr->daddr, th->dest,
346 &hdr->saddr, ntohs(th->source),
347 skb->dev->ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
Eric Dumazet22150892015-03-22 10:22:23 -0700349 if (!sk) {
Eric Dumazeta16292a2016-04-27 16:44:36 -0700350 __ICMP6_INC_STATS(net, __in6_dev_get(skb->dev),
351 ICMP6_MIB_INERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 return;
353 }
354
355 if (sk->sk_state == TCP_TIME_WAIT) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -0700356 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 return;
358 }
Eric Dumazet22150892015-03-22 10:22:23 -0700359 seq = ntohl(th->seq);
Eric Dumazet9cf74902016-02-02 19:31:12 -0800360 fatal = icmpv6_err_convert(type, code, &err);
Eric Dumazet22150892015-03-22 10:22:23 -0700361 if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazet9cf74902016-02-02 19:31:12 -0800362 return tcp_req_err(sk, seq, fatal);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364 bh_lock_sock(sk);
Eric Dumazet563d34d2012-07-23 09:48:52 +0200365 if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700366 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
368 if (sk->sk_state == TCP_CLOSE)
369 goto out;
370
Stephen Hemmingere802af92010-04-22 15:24:53 -0700371 if (ipv6_hdr(skb)->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700372 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -0700373 goto out;
374 }
375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 tp = tcp_sk(sk);
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700377 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
378 fastopen = tp->fastopen_rsk;
379 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 if (sk->sk_state != TCP_LISTEN &&
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700381 !between(seq, snd_una, tp->snd_nxt)) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700382 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 goto out;
384 }
385
386 np = inet6_sk(sk);
387
David S. Millerec18d9a2012-07-12 00:25:15 -0700388 if (type == NDISC_REDIRECT) {
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100389 if (!sock_owned_by_user(sk)) {
390 struct dst_entry *dst = __sk_dst_check(sk, np->dst_cookie);
David S. Millerec18d9a2012-07-12 00:25:15 -0700391
Jon Maxwell45caeaa2017-03-10 16:40:33 +1100392 if (dst)
393 dst->ops->redirect(dst, sk, skb);
394 }
Christoph Paasch50a75a82013-04-07 04:53:15 +0000395 goto out;
David S. Millerec18d9a2012-07-12 00:25:15 -0700396 }
397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 if (type == ICMPV6_PKT_TOOBIG) {
Eric Dumazet0d4f0602013-03-18 07:01:28 +0000399 /* We are not interested in TCP_LISTEN and open_requests
400 * (SYN-ACKs send out by Linux are always <576bytes so
401 * they should go through unfragmented).
402 */
403 if (sk->sk_state == TCP_LISTEN)
404 goto out;
405
Hannes Frederic Sowa93b36cf2013-12-15 03:41:14 +0100406 if (!ip6_sk_accept_pmtu(sk))
407 goto out;
408
Eric Dumazet563d34d2012-07-23 09:48:52 +0200409 tp->mtu_info = ntohl(info);
410 if (!sock_owned_by_user(sk))
411 tcp_v6_mtu_reduced(sk);
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000412 else if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED,
Eric Dumazet7aa54702016-12-03 11:14:57 -0800413 &sk->sk_tsq_flags))
Julian Anastasovd013ef2a2012-09-05 10:53:18 +0000414 sock_hold(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 goto out;
416 }
417
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700419 /* Might be for an request_sock */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 switch (sk->sk_state) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 case TCP_SYN_SENT:
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700422 case TCP_SYN_RECV:
423 /* Only in fast or simultaneous open. If a fast open socket is
424 * is already accepted it is treated as a connected one below.
425 */
Ian Morris63159f22015-03-29 14:00:04 +0100426 if (fastopen && !fastopen->sk)
Yuchung Cheng0a672f72014-05-11 20:22:12 -0700427 break;
428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 if (!sock_owned_by_user(sk)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 sk->sk_err = err;
431 sk->sk_error_report(sk); /* Wake people up to see the error (see connect in sock.c) */
432
433 tcp_done(sk);
434 } else
435 sk->sk_err_soft = err;
436 goto out;
437 }
438
439 if (!sock_owned_by_user(sk) && np->recverr) {
440 sk->sk_err = err;
441 sk->sk_error_report(sk);
442 } else
443 sk->sk_err_soft = err;
444
445out:
446 bh_unlock_sock(sk);
447 sock_put(sk);
448}
449
450
Eric Dumazet0f935db2015-09-25 07:39:21 -0700451static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300452 struct flowi *fl,
Neal Cardwell3840a062012-06-28 12:34:19 +0000453 struct request_sock *req,
Eric Dumazetca6fb062015-10-02 11:43:35 -0700454 struct tcp_fastopen_cookie *foc,
Eric Dumazetb3d05142016-04-13 22:05:39 -0700455 enum tcp_synack_type synack_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456{
Eric Dumazet634fb9792013-10-09 15:21:29 -0700457 struct inet_request_sock *ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 struct ipv6_pinfo *np = inet6_sk(sk);
Huw Davies56ac42b2016-06-27 15:05:28 -0400459 struct ipv6_txoptions *opt;
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300460 struct flowi6 *fl6 = &fl->u.ip6;
Weilong Chen4c99aa42013-12-19 18:44:34 +0800461 struct sk_buff *skb;
Neal Cardwell94942182012-06-28 12:34:20 +0000462 int err = -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
Neal Cardwell9f10d3f2012-06-28 12:34:21 +0000464 /* First, grab a route. */
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700465 if (!dst && (dst = inet6_csk_route_req(sk, fl6, req,
466 IPPROTO_TCP)) == NULL)
Denis V. Lunevfd80eb92008-02-29 11:43:03 -0800467 goto done;
Neal Cardwell94942182012-06-28 12:34:20 +0000468
Eric Dumazetb3d05142016-04-13 22:05:39 -0700469 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
Neal Cardwell94942182012-06-28 12:34:20 +0000470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 if (skb) {
Eric Dumazet634fb9792013-10-09 15:21:29 -0700472 __tcp_v6_send_check(skb, &ireq->ir_v6_loc_addr,
473 &ireq->ir_v6_rmt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
Eric Dumazet634fb9792013-10-09 15:21:29 -0700475 fl6->daddr = ireq->ir_v6_rmt_addr;
Ian Morris53b24b82015-03-29 14:00:05 +0100476 if (np->repflow && ireq->pktopts)
Florent Fourcotdf3687f2014-01-17 17:15:03 +0100477 fl6->flowlabel = ip6_flowlabel(ipv6_hdr(ireq->pktopts));
478
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800479 rcu_read_lock();
Huw Davies56ac42b2016-06-27 15:05:28 -0400480 opt = ireq->ipv6_opt;
481 if (!opt)
482 opt = rcu_dereference(np->opt);
Pablo Neira92e55f42017-01-26 22:56:21 +0100483 err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
Eric Dumazet3e4006f2016-01-08 09:35:51 -0800484 rcu_read_unlock();
Gerrit Renkerb9df3cb2006-11-14 11:21:36 -0200485 err = net_xmit_eval(err);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 }
487
488done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 return err;
490}
491
Octavian Purdila72659ec2010-01-17 19:09:39 -0800492
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700493static void tcp_v6_reqsk_destructor(struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494{
Huw Davies56ac42b2016-06-27 15:05:28 -0400495 kfree(inet_rsk(req)->ipv6_opt);
Eric Dumazet634fb9792013-10-09 15:21:29 -0700496 kfree_skb(inet_rsk(req)->pktopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497}
498
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800499#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700500static struct tcp_md5sig_key *tcp_v6_md5_do_lookup(const struct sock *sk,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000501 const struct in6_addr *addr)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800502{
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000503 return tcp_md5_do_lookup(sk, (union tcp_md5_addr *)addr, AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800504}
505
Eric Dumazetb83e3de2015-09-25 07:39:15 -0700506static struct tcp_md5sig_key *tcp_v6_md5_lookup(const struct sock *sk,
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700507 const struct sock *addr_sk)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800508{
Eric Dumazetefe42082013-10-03 15:42:29 -0700509 return tcp_v6_md5_do_lookup(sk, &addr_sk->sk_v6_daddr);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800510}
511
Wang Yufen4aa956d2014-03-29 09:27:29 +0800512static int tcp_v6_parse_md5_keys(struct sock *sk, char __user *optval,
513 int optlen)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800514{
515 struct tcp_md5sig cmd;
516 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&cmd.tcpm_addr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800517
518 if (optlen < sizeof(cmd))
519 return -EINVAL;
520
521 if (copy_from_user(&cmd, optval, sizeof(cmd)))
522 return -EFAULT;
523
524 if (sin6->sin6_family != AF_INET6)
525 return -EINVAL;
526
527 if (!cmd.tcpm_keylen) {
Brian Haleye773e4f2007-08-24 23:16:08 -0700528 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000529 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
530 AF_INET);
531 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
532 AF_INET6);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800533 }
534
535 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
536 return -EINVAL;
537
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000538 if (ipv6_addr_v4mapped(&sin6->sin6_addr))
539 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr.s6_addr32[3],
540 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800541
Eric Dumazeta915da9b2012-01-31 05:18:33 +0000542 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin6->sin6_addr,
543 AF_INET6, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800544}
545
Eric Dumazet19689e32016-06-27 18:51:53 +0200546static int tcp_v6_md5_hash_headers(struct tcp_md5sig_pool *hp,
547 const struct in6_addr *daddr,
548 const struct in6_addr *saddr,
549 const struct tcphdr *th, int nbytes)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800550{
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800551 struct tcp6_pseudohdr *bp;
Adam Langley49a72df2008-07-19 00:01:42 -0700552 struct scatterlist sg;
Eric Dumazet19689e32016-06-27 18:51:53 +0200553 struct tcphdr *_th;
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +0900554
Eric Dumazet19689e32016-06-27 18:51:53 +0200555 bp = hp->scratch;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800556 /* 1. TCP pseudo-header (RFC2460) */
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000557 bp->saddr = *saddr;
558 bp->daddr = *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700559 bp->protocol = cpu_to_be32(IPPROTO_TCP);
Adam Langley00b13042008-07-31 21:36:07 -0700560 bp->len = cpu_to_be32(nbytes);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800561
Eric Dumazet19689e32016-06-27 18:51:53 +0200562 _th = (struct tcphdr *)(bp + 1);
563 memcpy(_th, th, sizeof(*th));
564 _th->check = 0;
565
566 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
567 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
568 sizeof(*bp) + sizeof(*th));
Herbert Xucf80e0e2016-01-24 21:20:23 +0800569 return crypto_ahash_update(hp->md5_req);
Adam Langley49a72df2008-07-19 00:01:42 -0700570}
David S. Millerc7da57a2007-10-26 00:41:21 -0700571
Eric Dumazet19689e32016-06-27 18:51:53 +0200572static int tcp_v6_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000573 const struct in6_addr *daddr, struct in6_addr *saddr,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400574 const struct tcphdr *th)
Adam Langley49a72df2008-07-19 00:01:42 -0700575{
576 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800577 struct ahash_request *req;
Adam Langley49a72df2008-07-19 00:01:42 -0700578
579 hp = tcp_get_md5sig_pool();
580 if (!hp)
581 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800582 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700583
Herbert Xucf80e0e2016-01-24 21:20:23 +0800584 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700585 goto clear_hash;
Eric Dumazet19689e32016-06-27 18:51:53 +0200586 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
Adam Langley49a72df2008-07-19 00:01:42 -0700587 goto clear_hash;
588 if (tcp_md5_hash_key(hp, key))
589 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800590 ahash_request_set_crypt(req, NULL, md5_hash, 0);
591 if (crypto_ahash_final(req))
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800592 goto clear_hash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800593
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800594 tcp_put_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800595 return 0;
Adam Langley49a72df2008-07-19 00:01:42 -0700596
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800597clear_hash:
598 tcp_put_md5sig_pool();
599clear_hash_noput:
600 memset(md5_hash, 0, 16);
Adam Langley49a72df2008-07-19 00:01:42 -0700601 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800602}
603
Eric Dumazet39f8e582015-03-24 15:58:55 -0700604static int tcp_v6_md5_hash_skb(char *md5_hash,
605 const struct tcp_md5sig_key *key,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400606 const struct sock *sk,
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400607 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800608{
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000609 const struct in6_addr *saddr, *daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700610 struct tcp_md5sig_pool *hp;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800611 struct ahash_request *req;
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400612 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800613
Eric Dumazet39f8e582015-03-24 15:58:55 -0700614 if (sk) { /* valid for establish/request sockets */
615 saddr = &sk->sk_v6_rcv_saddr;
Eric Dumazetefe42082013-10-03 15:42:29 -0700616 daddr = &sk->sk_v6_daddr;
Adam Langley49a72df2008-07-19 00:01:42 -0700617 } else {
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000618 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700619 saddr = &ip6h->saddr;
620 daddr = &ip6h->daddr;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800621 }
Adam Langley49a72df2008-07-19 00:01:42 -0700622
623 hp = tcp_get_md5sig_pool();
624 if (!hp)
625 goto clear_hash_noput;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800626 req = hp->md5_req;
Adam Langley49a72df2008-07-19 00:01:42 -0700627
Herbert Xucf80e0e2016-01-24 21:20:23 +0800628 if (crypto_ahash_init(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700629 goto clear_hash;
630
Eric Dumazet19689e32016-06-27 18:51:53 +0200631 if (tcp_v6_md5_hash_headers(hp, daddr, saddr, th, skb->len))
Adam Langley49a72df2008-07-19 00:01:42 -0700632 goto clear_hash;
633 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
634 goto clear_hash;
635 if (tcp_md5_hash_key(hp, key))
636 goto clear_hash;
Herbert Xucf80e0e2016-01-24 21:20:23 +0800637 ahash_request_set_crypt(req, NULL, md5_hash, 0);
638 if (crypto_ahash_final(req))
Adam Langley49a72df2008-07-19 00:01:42 -0700639 goto clear_hash;
640
641 tcp_put_md5sig_pool();
642 return 0;
643
644clear_hash:
645 tcp_put_md5sig_pool();
646clear_hash_noput:
647 memset(md5_hash, 0, 16);
648 return 1;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800649}
650
Eric Dumazetba8e2752015-10-02 11:43:28 -0700651#endif
652
653static bool tcp_v6_inbound_md5_hash(const struct sock *sk,
654 const struct sk_buff *skb)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800655{
Eric Dumazetba8e2752015-10-02 11:43:28 -0700656#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400657 const __u8 *hash_location = NULL;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800658 struct tcp_md5sig_key *hash_expected;
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000659 const struct ipv6hdr *ip6h = ipv6_hdr(skb);
Eric Dumazet318cf7a2011-10-24 02:46:04 -0400660 const struct tcphdr *th = tcp_hdr(skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800661 int genhash;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800662 u8 newhash[16];
663
664 hash_expected = tcp_v6_md5_do_lookup(sk, &ip6h->saddr);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900665 hash_location = tcp_parse_md5sig_option(th);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800666
David S. Miller785957d2008-07-30 03:03:15 -0700667 /* We've parsed the options - do we have a hash? */
668 if (!hash_expected && !hash_location)
Eric Dumazetff74e232015-03-24 15:58:54 -0700669 return false;
David S. Miller785957d2008-07-30 03:03:15 -0700670
671 if (hash_expected && !hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700672 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
Eric Dumazetff74e232015-03-24 15:58:54 -0700673 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800674 }
675
David S. Miller785957d2008-07-30 03:03:15 -0700676 if (!hash_expected && hash_location) {
Eric Dumazetc10d9312016-04-29 14:16:47 -0700677 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
Eric Dumazetff74e232015-03-24 15:58:54 -0700678 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800679 }
680
681 /* check the signature */
Adam Langley49a72df2008-07-19 00:01:42 -0700682 genhash = tcp_v6_md5_hash_skb(newhash,
683 hash_expected,
Eric Dumazet39f8e582015-03-24 15:58:55 -0700684 NULL, skb);
Adam Langley49a72df2008-07-19 00:01:42 -0700685
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800686 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
Eric Dumazet72145a62016-08-24 09:01:23 -0700687 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
Joe Perchese87cc472012-05-13 21:56:26 +0000688 net_info_ratelimited("MD5 Hash %s for [%pI6c]:%u->[%pI6c]:%u\n",
689 genhash ? "failed" : "mismatch",
690 &ip6h->saddr, ntohs(th->source),
691 &ip6h->daddr, ntohs(th->dest));
Eric Dumazetff74e232015-03-24 15:58:54 -0700692 return true;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800693 }
Eric Dumazetba8e2752015-10-02 11:43:28 -0700694#endif
Eric Dumazetff74e232015-03-24 15:58:54 -0700695 return false;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800696}
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800697
Eric Dumazetb40cf182015-09-25 07:39:08 -0700698static void tcp_v6_init_req(struct request_sock *req,
699 const struct sock *sk_listener,
Octavian Purdila16bea702014-06-25 17:09:53 +0300700 struct sk_buff *skb)
701{
702 struct inet_request_sock *ireq = inet_rsk(req);
Eric Dumazetb40cf182015-09-25 07:39:08 -0700703 const struct ipv6_pinfo *np = inet6_sk(sk_listener);
Octavian Purdila16bea702014-06-25 17:09:53 +0300704
705 ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
706 ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
707
Octavian Purdila16bea702014-06-25 17:09:53 +0300708 /* So that link locals have meaning */
Eric Dumazetb40cf182015-09-25 07:39:08 -0700709 if (!sk_listener->sk_bound_dev_if &&
Octavian Purdila16bea702014-06-25 17:09:53 +0300710 ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
Eric Dumazet870c3152014-10-17 09:17:20 -0700711 ireq->ir_iif = tcp_v6_iif(skb);
Octavian Purdila16bea702014-06-25 17:09:53 +0300712
Eric Dumazet04317da2014-09-05 15:33:32 -0700713 if (!TCP_SKB_CB(skb)->tcp_tw_isn &&
Eric Dumazetb40cf182015-09-25 07:39:08 -0700714 (ipv6_opt_accepted(sk_listener, skb, &TCP_SKB_CB(skb)->header.h6) ||
Eric Dumazeta2247722014-09-27 09:50:56 -0700715 np->rxopt.bits.rxinfo ||
Octavian Purdila16bea702014-06-25 17:09:53 +0300716 np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
717 np->rxopt.bits.rxohlim || np->repflow)) {
718 atomic_inc(&skb->users);
719 ireq->pktopts = skb;
720 }
721}
722
Eric Dumazetf9646292015-09-29 07:42:50 -0700723static struct dst_entry *tcp_v6_route_req(const struct sock *sk,
724 struct flowi *fl,
Soheil Hassas Yeganeh4396e462017-03-15 16:30:46 -0400725 const struct request_sock *req)
Octavian Purdilad94e0412014-06-25 17:09:55 +0300726{
Eric Dumazetf76b33c2015-09-29 07:42:42 -0700727 return inet6_csk_route_req(sk, &fl->u.ip6, req, IPPROTO_TCP);
Octavian Purdilad94e0412014-06-25 17:09:55 +0300728}
729
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800730struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 .family = AF_INET6,
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -0700732 .obj_size = sizeof(struct tcp6_request_sock),
Octavian Purdila5db92c92014-06-25 17:09:59 +0300733 .rtx_syn_ack = tcp_rtx_synack,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -0700734 .send_ack = tcp_v6_reqsk_send_ack,
735 .destructor = tcp_v6_reqsk_destructor,
Octavian Purdila72659ec2010-01-17 19:09:39 -0800736 .send_reset = tcp_v6_send_reset,
Wang Yufen4aa956d2014-03-29 09:27:29 +0800737 .syn_ack_timeout = tcp_syn_ack_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738};
739
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +0000740static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
Octavian Purdila2aec4a22014-06-25 17:10:00 +0300741 .mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) -
742 sizeof(struct ipv6hdr),
Octavian Purdila16bea702014-06-25 17:09:53 +0300743#ifdef CONFIG_TCP_MD5SIG
Eric Dumazetfd3a1542015-03-24 15:58:56 -0700744 .req_md5_lookup = tcp_v6_md5_lookup,
John Dykstrae3afe7b2009-07-16 05:04:51 +0000745 .calc_md5_hash = tcp_v6_md5_hash_skb,
Andrew Mortonb6332e62006-11-30 19:16:28 -0800746#endif
Octavian Purdila16bea702014-06-25 17:09:53 +0300747 .init_req = tcp_v6_init_req,
Octavian Purdilafb7b37a2014-06-25 17:09:54 +0300748#ifdef CONFIG_SYN_COOKIES
749 .cookie_init_seq = cookie_v6_init_sequence,
750#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +0300751 .route_req = tcp_v6_route_req,
Alexey Kodaneva30aad52017-03-09 13:53:55 +0300752 .init_seq_tsoff = tcp_v6_init_seq_and_tsoff,
Octavian Purdilad6274bd2014-06-25 17:09:58 +0300753 .send_synack = tcp_v6_send_synack,
Octavian Purdila16bea702014-06-25 17:09:53 +0300754};
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800755
Eric Dumazeta00e7442015-09-29 07:42:39 -0700756static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800757 u32 ack, u32 win, u32 tsval, u32 tsecr,
758 int oif, struct tcp_md5sig_key *key, int rst,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200759 u8 tclass, __be32 label)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400761 const struct tcphdr *th = tcp_hdr(skb);
762 struct tcphdr *t1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 struct sk_buff *buff;
David S. Miller4c9483b2011-03-12 16:22:43 -0500764 struct flowi6 fl6;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800765 struct net *net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
Daniel Lezcanoe5047992008-03-07 11:16:26 -0800766 struct sock *ctl_sk = net->ipv6.tcp_sk;
YOSHIFUJI Hideaki9cb57342008-01-12 02:16:03 -0800767 unsigned int tot_len = sizeof(struct tcphdr);
Eric Dumazetadf30902009-06-02 05:19:30 +0000768 struct dst_entry *dst;
Al Viroe69a4ad2006-11-14 20:56:00 -0800769 __be32 *topt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Andrey Vaginee684b62013-02-11 05:50:19 +0000771 if (tsecr)
YOSHIFUJI Hideaki4244f8a2006-10-10 19:40:50 -0700772 tot_len += TCPOLEN_TSTAMP_ALIGNED;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800773#ifdef CONFIG_TCP_MD5SIG
774 if (key)
775 tot_len += TCPOLEN_MD5SIG_ALIGNED;
776#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778 buff = alloc_skb(MAX_HEADER + sizeof(struct ipv6hdr) + tot_len,
779 GFP_ATOMIC);
Ian Morris63159f22015-03-29 14:00:04 +0100780 if (!buff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return;
782
783 skb_reserve(buff, MAX_HEADER + sizeof(struct ipv6hdr) + tot_len);
784
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700785 t1 = (struct tcphdr *) skb_push(buff, tot_len);
Herbert Xu6651ffc2010-04-21 00:47:15 -0700786 skb_reset_transport_header(buff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787
788 /* Swap the send and the receive. */
789 memset(t1, 0, sizeof(*t1));
790 t1->dest = th->source;
791 t1->source = th->dest;
Ilpo Järvinen77c676d2008-10-09 14:41:38 -0700792 t1->doff = tot_len / 4;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 t1->seq = htonl(seq);
794 t1->ack_seq = htonl(ack);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700795 t1->ack = !rst || !th->ack;
796 t1->rst = rst;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 t1->window = htons(win);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800798
Al Viroe69a4ad2006-11-14 20:56:00 -0800799 topt = (__be32 *)(t1 + 1);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900800
Andrey Vaginee684b62013-02-11 05:50:19 +0000801 if (tsecr) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800802 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
803 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
Andrey Vaginee684b62013-02-11 05:50:19 +0000804 *topt++ = htonl(tsval);
805 *topt++ = htonl(tsecr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 }
807
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800808#ifdef CONFIG_TCP_MD5SIG
809 if (key) {
810 *topt++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
811 (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG);
Adam Langley49a72df2008-07-19 00:01:42 -0700812 tcp_v6_md5_hash_hdr((__u8 *)topt, key,
Adam Langley90b7e112008-07-31 20:49:48 -0700813 &ipv6_hdr(skb)->saddr,
814 &ipv6_hdr(skb)->daddr, t1);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800815 }
816#endif
817
David S. Miller4c9483b2011-03-12 16:22:43 -0500818 memset(&fl6, 0, sizeof(fl6));
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +0000819 fl6.daddr = ipv6_hdr(skb)->saddr;
820 fl6.saddr = ipv6_hdr(skb)->daddr;
Florent Fourcot1d13a962014-01-16 17:21:22 +0100821 fl6.flowlabel = label;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822
David S. Millere5700af2010-04-21 14:59:20 -0700823 buff->ip_summed = CHECKSUM_PARTIAL;
824 buff->csum = 0;
825
David S. Miller4c9483b2011-03-12 16:22:43 -0500826 __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
David S. Miller4c9483b2011-03-12 16:22:43 -0500828 fl6.flowi6_proto = IPPROTO_TCP;
Lorenzo Colittia36dbdb2014-04-11 13:19:12 +0900829 if (rt6_need_strict(&fl6.daddr) && !oif)
Eric Dumazet870c3152014-10-17 09:17:20 -0700830 fl6.flowi6_oif = tcp_v6_iif(skb);
David Ahern9b6c14d2016-11-09 09:07:26 -0800831 else {
832 if (!oif && netif_index_is_l3_master(net, skb->skb_iif))
833 oif = skb->skb_iif;
834
835 fl6.flowi6_oif = oif;
836 }
David Ahern1d2f7b22016-05-04 21:26:08 -0700837
Lorenzo Colittie1108612014-05-13 10:17:33 -0700838 fl6.flowi6_mark = IP6_REPLY_MARK(net, skb->mark);
David S. Miller1958b852011-03-12 16:36:19 -0500839 fl6.fl6_dport = t1->dest;
840 fl6.fl6_sport = t1->source;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +0900841 fl6.flowi6_uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
David S. Miller4c9483b2011-03-12 16:22:43 -0500842 security_skb_classify_flow(skb, flowi6_to_flowi(&fl6));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700844 /* Pass a socket to ip6_dst_lookup either it is for RST
845 * Underlying function will use this to retrieve the network
846 * namespace
847 */
Steffen Klassert0e0d44a2013-08-28 08:04:14 +0200848 dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800849 if (!IS_ERR(dst)) {
850 skb_dst_set(buff, dst);
Pablo Neira92e55f42017-01-26 22:56:21 +0100851 ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700852 TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800853 if (rst)
Eric Dumazetc10d9312016-04-29 14:16:47 -0700854 TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
David S. Miller68d0c6d2011-03-01 13:19:07 -0800855 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 }
857
858 kfree_skb(buff);
859}
860
Eric Dumazeta00e7442015-09-29 07:42:39 -0700861static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700862{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400863 const struct tcphdr *th = tcp_hdr(skb);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700864 u32 seq = 0, ack_seq = 0;
Guo-Fu Tsengfa3e5b42008-10-09 21:11:56 -0700865 struct tcp_md5sig_key *key = NULL;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000866#ifdef CONFIG_TCP_MD5SIG
867 const __u8 *hash_location = NULL;
868 struct ipv6hdr *ipv6h = ipv6_hdr(skb);
869 unsigned char newhash[16];
870 int genhash;
871 struct sock *sk1 = NULL;
872#endif
Wang Yufen9c76a112014-03-29 09:27:31 +0800873 int oif;
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700874
875 if (th->rst)
876 return;
877
Eric Dumazetc3658e82014-11-25 07:40:04 -0800878 /* If sk not NULL, it means we did a successful lookup and incoming
879 * route had to be correct. prequeue might have dropped our dst.
880 */
881 if (!sk && !ipv6_unicast_destination(skb))
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700882 return;
883
884#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700885 rcu_read_lock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000886 hash_location = tcp_parse_md5sig_option(th);
Florian Westphal271c3b92015-12-21 21:29:26 +0100887 if (sk && sk_fullsock(sk)) {
Florian Westphale46787f2015-12-21 21:29:25 +0100888 key = tcp_v6_md5_do_lookup(sk, &ipv6h->saddr);
889 } else if (hash_location) {
Shawn Lu658ddaa2012-01-31 22:35:48 +0000890 /*
891 * active side is lost. Try to find listening socket through
892 * source port, and then find md5 key through listening socket.
893 * we are not loose security here:
894 * Incoming packet is checked with md5 hash with finding key,
895 * no RST generated if md5 hash doesn't match.
896 */
897 sk1 = inet6_lookup_listener(dev_net(skb_dst(skb)->dev),
Craig Galleka5836362016-02-10 11:50:38 -0500898 &tcp_hashinfo, NULL, 0,
899 &ipv6h->saddr,
Tom Herbert5ba24952013-01-22 09:50:39 +0000900 th->source, &ipv6h->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -0700901 ntohs(th->source), tcp_v6_iif(skb));
Shawn Lu658ddaa2012-01-31 22:35:48 +0000902 if (!sk1)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700903 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000904
Shawn Lu658ddaa2012-01-31 22:35:48 +0000905 key = tcp_v6_md5_do_lookup(sk1, &ipv6h->saddr);
906 if (!key)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700907 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000908
Eric Dumazet39f8e582015-03-24 15:58:55 -0700909 genhash = tcp_v6_md5_hash_skb(newhash, key, NULL, skb);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000910 if (genhash || memcmp(hash_location, newhash, 16) != 0)
Eric Dumazet3b24d852016-04-01 08:52:17 -0700911 goto out;
Shawn Lu658ddaa2012-01-31 22:35:48 +0000912 }
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700913#endif
914
915 if (th->ack)
916 seq = ntohl(th->ack_seq);
917 else
918 ack_seq = ntohl(th->seq) + th->syn + th->fin + skb->len -
919 (th->doff << 2);
920
Wang Yufen9c76a112014-03-29 09:27:31 +0800921 oif = sk ? sk->sk_bound_dev_if : 0;
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800922 tcp_v6_send_response(sk, skb, seq, ack_seq, 0, 0, 0, oif, key, 1, 0, 0);
Shawn Lu658ddaa2012-01-31 22:35:48 +0000923
924#ifdef CONFIG_TCP_MD5SIG
Eric Dumazet3b24d852016-04-01 08:52:17 -0700925out:
926 rcu_read_unlock();
Shawn Lu658ddaa2012-01-31 22:35:48 +0000927#endif
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700928}
929
Eric Dumazeta00e7442015-09-29 07:42:39 -0700930static void tcp_v6_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq,
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800931 u32 ack, u32 win, u32 tsval, u32 tsecr, int oif,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100932 struct tcp_md5sig_key *key, u8 tclass,
Hannes Frederic Sowa5119bd12016-06-11 20:41:38 +0200933 __be32 label)
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700934{
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800935 tcp_v6_send_response(sk, skb, seq, ack, win, tsval, tsecr, oif, key, 0,
936 tclass, label);
Ilpo Järvinen626e2642008-10-09 14:42:40 -0700937}
938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
940{
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700941 struct inet_timewait_sock *tw = inet_twsk(sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800942 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800944 tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700945 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
Andrey Vaginee684b62013-02-11 05:50:19 +0000946 tcp_time_stamp + tcptw->tw_ts_offset,
Wang Yufen9c76a112014-03-29 09:27:31 +0800947 tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
Florent Fourcot21858cd2015-05-16 00:24:59 +0200948 tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -0700950 inet_twsk_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
952
Eric Dumazeta00e7442015-09-29 07:42:39 -0700953static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
Gui Jianfeng6edafaa2008-08-06 23:50:04 -0700954 struct request_sock *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955{
Daniel Lee3a19ce02014-05-11 20:22:13 -0700956 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
957 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
958 */
Eric Dumazet20a2b492016-08-22 11:31:10 -0700959 /* RFC 7323 2.3
960 * The window field (SEG.WND) of every outgoing segment, with the
961 * exception of <SYN> segments, MUST be right-shifted by
962 * Rcv.Wind.Shift bits:
963 */
Eric Dumazet0f85fea2014-12-09 09:56:08 -0800964 tcp_v6_send_ack(sk, skb, (sk->sk_state == TCP_LISTEN) ?
Daniel Lee3a19ce02014-05-11 20:22:13 -0700965 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
Eric Dumazet20a2b492016-08-22 11:31:10 -0700966 tcp_rsk(req)->rcv_nxt,
967 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
Florian Westphal95a22ca2016-12-01 11:32:06 +0100968 tcp_time_stamp + tcp_rsk(req)->ts_off,
969 req->ts_recent, sk->sk_bound_dev_if,
Florent Fourcot1d13a962014-01-16 17:21:22 +0100970 tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
971 0, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972}
973
974
Eric Dumazet079096f2015-10-02 11:43:32 -0700975static struct sock *tcp_v6_cookie_check(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976{
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800977#ifdef CONFIG_SYN_COOKIES
Eric Dumazet079096f2015-10-02 11:43:32 -0700978 const struct tcphdr *th = tcp_hdr(skb);
979
Florian Westphalaf9b4732010-06-03 00:43:44 +0000980 if (!th->syn)
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800981 sk = cookie_v6_check(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982#endif
983 return sk;
984}
985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
987{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 if (skb->protocol == htons(ETH_P_IP))
989 return tcp_v4_conn_request(sk, skb);
990
991 if (!ipv6_unicast_destination(skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900992 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Octavian Purdila1fb6f152014-06-25 17:10:02 +0300994 return tcp_conn_request(&tcp6_request_sock_ops,
995 &tcp_request_sock_ipv6_ops, sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997drop:
Eric Dumazet9caad862016-04-01 08:52:20 -0700998 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 return 0; /* don't send reset */
1000}
1001
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001002static void tcp_v6_restore_cb(struct sk_buff *skb)
1003{
1004 /* We need to move header back to the beginning if xfrm6_policy_check()
1005 * and tcp_v6_fill_cb() are going to be called again.
1006 * ip6_datagram_recv_specific_ctl() also expects IP6CB to be there.
1007 */
1008 memmove(IP6CB(skb), &TCP_SKB_CB(skb)->header.h6,
1009 sizeof(struct inet6_skb_parm));
1010}
1011
Eric Dumazet0c271712015-09-29 07:42:48 -07001012static struct sock *tcp_v6_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001013 struct request_sock *req,
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001014 struct dst_entry *dst,
1015 struct request_sock *req_unhash,
1016 bool *own_req)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017{
Eric Dumazet634fb9792013-10-09 15:21:29 -07001018 struct inet_request_sock *ireq;
Eric Dumazet0c271712015-09-29 07:42:48 -07001019 struct ipv6_pinfo *newnp;
1020 const struct ipv6_pinfo *np = inet6_sk(sk);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001021 struct ipv6_txoptions *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022 struct tcp6_sock *newtcp6sk;
1023 struct inet_sock *newinet;
1024 struct tcp_sock *newtp;
1025 struct sock *newsk;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001026#ifdef CONFIG_TCP_MD5SIG
1027 struct tcp_md5sig_key *key;
1028#endif
Neal Cardwell3840a062012-06-28 12:34:19 +00001029 struct flowi6 fl6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030
1031 if (skb->protocol == htons(ETH_P_IP)) {
1032 /*
1033 * v6 mapped
1034 */
1035
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001036 newsk = tcp_v4_syn_recv_sock(sk, skb, req, dst,
1037 req_unhash, own_req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038
Ian Morris63159f22015-03-29 14:00:04 +01001039 if (!newsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 return NULL;
1041
1042 newtcp6sk = (struct tcp6_sock *)newsk;
1043 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1044
1045 newinet = inet_sk(newsk);
1046 newnp = inet6_sk(newsk);
1047 newtp = tcp_sk(newsk);
1048
1049 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1050
Eric Dumazetd1e559d2015-03-18 14:05:35 -07001051 newnp->saddr = newsk->sk_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001053 inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 newsk->sk_backlog_rcv = tcp_v4_do_rcv;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001055#ifdef CONFIG_TCP_MD5SIG
1056 newtp->af_specific = &tcp_sock_ipv6_mapped_specific;
1057#endif
1058
Yan, Zheng676a1182011-09-25 02:21:30 +00001059 newnp->ipv6_ac_list = NULL;
1060 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 newnp->pktoptions = NULL;
1062 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001063 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001064 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001065 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001066 if (np->repflow)
1067 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001069 /*
1070 * No need to charge this sock to the relevant IPv6 refcnt debug socks count
1071 * here, tcp_create_openreq_child now does this for us, see the comment in
1072 * that function for the gory details. -acme
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074
1075 /* It is tricky place. Until this moment IPv4 tcp
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001076 worked with IPv6 icsk.icsk_af_ops.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 Sync it now.
1078 */
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001079 tcp_sync_mss(newsk, inet_csk(newsk)->icsk_pmtu_cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
1081 return newsk;
1082 }
1083
Eric Dumazet634fb9792013-10-09 15:21:29 -07001084 ireq = inet_rsk(req);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 if (sk_acceptq_is_full(sk))
1087 goto out_overflow;
1088
David S. Miller493f3772010-12-02 12:14:29 -08001089 if (!dst) {
Eric Dumazetf76b33c2015-09-29 07:42:42 -07001090 dst = inet6_csk_route_req(sk, &fl6, req, IPPROTO_TCP);
David S. Miller493f3772010-12-02 12:14:29 -08001091 if (!dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 goto out;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001093 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094
1095 newsk = tcp_create_openreq_child(sk, req, skb);
Ian Morris63159f22015-03-29 14:00:04 +01001096 if (!newsk)
Balazs Scheidler093d2822010-10-21 13:06:43 +02001097 goto out_nonewsk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098
Arnaldo Carvalho de Meloe6848972005-08-09 19:45:38 -07001099 /*
1100 * No need to charge this sock to the relevant IPv6 refcnt debug socks
1101 * count here, tcp_create_openreq_child now does this for us, see the
1102 * comment in that function for the gory details. -acme
1103 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001104
Stephen Hemminger59eed272006-08-25 15:55:43 -07001105 newsk->sk_gso_type = SKB_GSO_TCPV6;
Eric Dumazet6bd4f352015-12-02 21:53:57 -08001106 ip6_dst_store(newsk, dst, NULL, NULL);
Neal Cardwellfae6ef82012-08-19 03:30:38 +00001107 inet6_sk_rx_dst_set(newsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 newtcp6sk = (struct tcp6_sock *)newsk;
1110 inet_sk(newsk)->pinet6 = &newtcp6sk->inet6;
1111
1112 newtp = tcp_sk(newsk);
1113 newinet = inet_sk(newsk);
1114 newnp = inet6_sk(newsk);
1115
1116 memcpy(newnp, np, sizeof(struct ipv6_pinfo));
1117
Eric Dumazet634fb9792013-10-09 15:21:29 -07001118 newsk->sk_v6_daddr = ireq->ir_v6_rmt_addr;
1119 newnp->saddr = ireq->ir_v6_loc_addr;
1120 newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
1121 newsk->sk_bound_dev_if = ireq->ir_iif;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001123 /* Now IPv6 options...
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124
1125 First: no IPv4 options.
1126 */
Eric Dumazetf6d8bd02011-04-21 09:45:37 +00001127 newinet->inet_opt = NULL;
Yan, Zheng676a1182011-09-25 02:21:30 +00001128 newnp->ipv6_ac_list = NULL;
Masayuki Nakagawad35690b2007-03-16 16:14:03 -07001129 newnp->ipv6_fl_list = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130
1131 /* Clone RX bits */
1132 newnp->rxopt.all = np->rxopt.all;
1133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 newnp->pktoptions = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 newnp->opt = NULL;
Eric Dumazet870c3152014-10-17 09:17:20 -07001136 newnp->mcast_oif = tcp_v6_iif(skb);
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001137 newnp->mcast_hops = ipv6_hdr(skb)->hop_limit;
Florent Fourcot1397ed32013-12-08 15:46:57 +01001138 newnp->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001139 if (np->repflow)
1140 newnp->flow_label = ip6_flowlabel(ipv6_hdr(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141
1142 /* Clone native IPv6 options from listening socket (if any)
1143
1144 Yes, keeping reference count would be much more clever,
1145 but we make one more one thing there: reattach optmem
1146 to newsk.
1147 */
Huw Davies56ac42b2016-06-27 15:05:28 -04001148 opt = ireq->ipv6_opt;
1149 if (!opt)
1150 opt = rcu_dereference(np->opt);
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001151 if (opt) {
1152 opt = ipv6_dup_options(newsk, opt);
1153 RCU_INIT_POINTER(newnp->opt, opt);
1154 }
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001155 inet_csk(newsk)->icsk_ext_hdr_len = 0;
Eric Dumazet45f6fad2015-11-29 19:37:57 -08001156 if (opt)
1157 inet_csk(newsk)->icsk_ext_hdr_len = opt->opt_nflen +
1158 opt->opt_flen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159
Daniel Borkmann81164412015-01-05 23:57:48 +01001160 tcp_ca_openreq_child(newsk, dst);
1161
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 tcp_sync_mss(newsk, dst_mtu(dst));
Eric Dumazet3541f9e2017-02-02 08:04:56 -08001163 newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst));
Neal Cardwelld135c522012-04-22 09:45:47 +00001164
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 tcp_initialize_rcv_mss(newsk);
1166
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001167 newinet->inet_daddr = newinet->inet_saddr = LOOPBACK4_IPV6;
1168 newinet->inet_rcv_saddr = LOOPBACK4_IPV6;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001170#ifdef CONFIG_TCP_MD5SIG
1171 /* Copy over the MD5 key from the original socket */
Wang Yufen4aa956d2014-03-29 09:27:29 +08001172 key = tcp_v6_md5_do_lookup(sk, &newsk->sk_v6_daddr);
Ian Morris53b24b82015-03-29 14:00:05 +01001173 if (key) {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001174 /* We're using one, so create a matching key
1175 * on the newsk structure. If we fail to get
1176 * memory, then we end up not copying the key
1177 * across. Shucks.
1178 */
Eric Dumazetefe42082013-10-03 15:42:29 -07001179 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newsk->sk_v6_daddr,
Mel Gorman99a1dec2012-07-31 16:44:14 -07001180 AF_INET6, key->key, key->keylen,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001181 sk_gfp_mask(sk, GFP_ATOMIC));
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001182 }
1183#endif
1184
Balazs Scheidler093d2822010-10-21 13:06:43 +02001185 if (__inet_inherit_port(sk, newsk) < 0) {
Christoph Paasche337e242012-12-14 04:07:58 +00001186 inet_csk_prepare_forced_close(newsk);
1187 tcp_done(newsk);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001188 goto out;
1189 }
Eric Dumazet5e0724d2015-10-22 08:20:46 -07001190 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001191 if (*own_req) {
Eric Dumazet49a496c2015-11-05 12:50:19 -08001192 tcp_move_syn(newtp, req);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001193
1194 /* Clone pktoptions received with SYN, if we own the req */
1195 if (ireq->pktopts) {
1196 newnp->pktoptions = skb_clone(ireq->pktopts,
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001197 sk_gfp_mask(sk, GFP_ATOMIC));
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001198 consume_skb(ireq->pktopts);
1199 ireq->pktopts = NULL;
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001200 if (newnp->pktoptions) {
1201 tcp_v6_restore_cb(newnp->pktoptions);
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001202 skb_set_owner_r(newnp->pktoptions, newsk);
Eric Dumazetebf6c9c2017-02-05 20:23:22 -08001203 }
Eric Dumazet805c4bc2015-11-05 11:07:13 -08001204 }
Eric Dumazetce105002015-10-30 09:46:12 -07001205 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
1207 return newsk;
1208
1209out_overflow:
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001210 __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001211out_nonewsk:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 dst_release(dst);
Balazs Scheidler093d2822010-10-21 13:06:43 +02001213out:
Eric Dumazet9caad862016-04-01 08:52:20 -07001214 tcp_listendrop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215 return NULL;
1216}
1217
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218/* The socket must have it's spinlock held when we get
Eric Dumazete994b2f2015-10-02 11:43:39 -07001219 * here, unless it is a TCP_LISTEN socket.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 *
1221 * We have a potential double-lock case here, so even when
1222 * doing backlog processing we use the BH locking scheme.
1223 * This is because we cannot sleep with the original spinlock
1224 * held.
1225 */
1226static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
1227{
1228 struct ipv6_pinfo *np = inet6_sk(sk);
1229 struct tcp_sock *tp;
1230 struct sk_buff *opt_skb = NULL;
1231
1232 /* Imagine: socket is IPv6. IPv4 packet arrives,
1233 goes to IPv4 receive handler and backlogged.
1234 From backlog it always goes here. Kerboom...
1235 Fortunately, tcp_rcv_established and rcv_established
1236 handle them correctly, but it is not case with
1237 tcp_v6_hnd_req and tcp_v6_send_reset(). --ANK
1238 */
1239
1240 if (skb->protocol == htons(ETH_P_IP))
1241 return tcp_v4_do_rcv(sk, skb);
1242
Eric Dumazetac6e7802016-11-10 13:12:35 -08001243 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 goto discard;
1245
1246 /*
1247 * socket locking is here for SMP purposes as backlog rcv
1248 * is currently called with bh processing disabled.
1249 */
1250
1251 /* Do Stevens' IPV6_PKTOPTIONS.
1252
1253 Yes, guys, it is the only place in our code, where we
1254 may make it not affecting IPv4.
1255 The rest of code is protocol independent,
1256 and I do not like idea to uglify IPv4.
1257
1258 Actually, all the idea behind IPV6_PKTOPTIONS
1259 looks not very well thought. For now we latch
1260 options, received in the last packet, enqueued
1261 by tcp. Feel free to propose better solution.
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001262 --ANK (980728)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 */
1264 if (np->rxopt.all)
Eric Dumazet7450aaf2015-11-30 08:57:28 -08001265 opt_skb = skb_clone(skb, sk_gfp_mask(sk, GFP_ATOMIC));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266
1267 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
Eric Dumazet5d299f32012-08-06 05:09:33 +00001268 struct dst_entry *dst = sk->sk_rx_dst;
1269
Tom Herbertbdeab992011-08-14 19:45:55 +00001270 sock_rps_save_rxhash(sk, skb);
Eric Dumazet3d973792014-11-11 05:54:27 -08001271 sk_mark_napi_id(sk, skb);
Eric Dumazet5d299f32012-08-06 05:09:33 +00001272 if (dst) {
1273 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1274 dst->ops->check(dst, np->rx_dst_cookie) == NULL) {
1275 dst_release(dst);
1276 sk->sk_rx_dst = NULL;
1277 }
1278 }
1279
Vijay Subramanianc995ae22013-09-03 12:23:22 -07001280 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 if (opt_skb)
1282 goto ipv6_pktoptions;
1283 return 0;
1284 }
1285
Eric Dumazet12e25e12015-06-03 23:49:21 -07001286 if (tcp_checksum_complete(skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 goto csum_err;
1288
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001289 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001290 struct sock *nsk = tcp_v6_cookie_check(sk, skb);
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 if (!nsk)
1293 goto discard;
1294
Weilong Chen4c99aa42013-12-19 18:44:34 +08001295 if (nsk != sk) {
Tom Herbertbdeab992011-08-14 19:45:55 +00001296 sock_rps_save_rxhash(nsk, skb);
Eric Dumazet38cb5242015-10-02 11:43:26 -07001297 sk_mark_napi_id(nsk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 if (tcp_child_process(sk, nsk, skb))
1299 goto reset;
1300 if (opt_skb)
1301 __kfree_skb(opt_skb);
1302 return 0;
1303 }
Neil Horman47482f132011-04-06 13:07:09 -07001304 } else
Tom Herbertbdeab992011-08-14 19:45:55 +00001305 sock_rps_save_rxhash(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306
Eric Dumazet72ab4a82015-09-29 07:42:41 -07001307 if (tcp_rcv_state_process(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 goto reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 if (opt_skb)
1310 goto ipv6_pktoptions;
1311 return 0;
1312
1313reset:
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001314 tcp_v6_send_reset(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315discard:
1316 if (opt_skb)
1317 __kfree_skb(opt_skb);
1318 kfree_skb(skb);
1319 return 0;
1320csum_err:
Eric Dumazetc10d9312016-04-29 14:16:47 -07001321 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1322 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 goto discard;
1324
1325
1326ipv6_pktoptions:
1327 /* Do you ask, what is it?
1328
1329 1. skb was enqueued by tcp.
1330 2. skb is added to tail of read queue, rather than out of order.
1331 3. socket is not in passive state.
1332 4. Finally, it really contains options, which user wants to receive.
1333 */
1334 tp = tcp_sk(sk);
1335 if (TCP_SKB_CB(opt_skb)->end_seq == tp->rcv_nxt &&
1336 !((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) {
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001337 if (np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo)
Eric Dumazet870c3152014-10-17 09:17:20 -07001338 np->mcast_oif = tcp_v6_iif(opt_skb);
YOSHIFUJI Hideaki333fad52005-09-08 09:59:17 +09001339 if (np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim)
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001340 np->mcast_hops = ipv6_hdr(opt_skb)->hop_limit;
Florent Fourcot82e9f102013-12-08 15:46:59 +01001341 if (np->rxopt.bits.rxflow || np->rxopt.bits.rxtclass)
Florent Fourcot1397ed32013-12-08 15:46:57 +01001342 np->rcv_flowinfo = ip6_flowinfo(ipv6_hdr(opt_skb));
Florent Fourcotdf3687f2014-01-17 17:15:03 +01001343 if (np->repflow)
1344 np->flow_label = ip6_flowlabel(ipv6_hdr(opt_skb));
Eric Dumazeta2247722014-09-27 09:50:56 -07001345 if (ipv6_opt_accepted(sk, opt_skb, &TCP_SKB_CB(opt_skb)->header.h6)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 skb_set_owner_r(opt_skb, sk);
Eric Dumazet8ce48622016-10-12 19:01:45 +02001347 tcp_v6_restore_cb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 opt_skb = xchg(&np->pktoptions, opt_skb);
1349 } else {
1350 __kfree_skb(opt_skb);
1351 opt_skb = xchg(&np->pktoptions, NULL);
1352 }
1353 }
1354
Wei Yongjun800d55f2009-02-23 21:45:33 +00001355 kfree_skb(opt_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 return 0;
1357}
1358
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001359static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
1360 const struct tcphdr *th)
1361{
1362 /* This is tricky: we move IP6CB at its correct location into
1363 * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
1364 * _decode_session6() uses IP6CB().
1365 * barrier() makes sure compiler won't play aliasing games.
1366 */
1367 memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
1368 sizeof(struct inet6_skb_parm));
1369 barrier();
1370
1371 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1372 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1373 skb->len - th->doff*4);
1374 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1375 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1376 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1377 TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
1378 TCP_SKB_CB(skb)->sacked = 0;
1379}
1380
Herbert Xue5bbef22007-10-15 12:50:28 -07001381static int tcp_v6_rcv(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001383 const struct tcphdr *th;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001384 const struct ipv6hdr *hdr;
Eric Dumazet3b24d852016-04-01 08:52:17 -07001385 bool refcounted;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 struct sock *sk;
1387 int ret;
Pavel Emelyanova86b1e32008-07-16 20:20:58 -07001388 struct net *net = dev_net(skb->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389
1390 if (skb->pkt_type != PACKET_HOST)
1391 goto discard_it;
1392
1393 /*
1394 * Count it even if it's bad.
1395 */
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001396 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397
1398 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1399 goto discard_it;
1400
Eric Dumazetea1627c2016-05-13 09:16:40 -07001401 th = (const struct tcphdr *)skb->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Eric Dumazetea1627c2016-05-13 09:16:40 -07001403 if (unlikely(th->doff < sizeof(struct tcphdr)/4))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 goto bad_packet;
1405 if (!pskb_may_pull(skb, th->doff*4))
1406 goto discard_it;
1407
Tom Herberte4f45b72014-05-02 16:29:51 -07001408 if (skb_checksum_init(skb, IPPROTO_TCP, ip6_compute_pseudo))
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001409 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410
Eric Dumazetea1627c2016-05-13 09:16:40 -07001411 th = (const struct tcphdr *)skb->data;
Stephen Hemmingere802af92010-04-22 15:24:53 -07001412 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001414lookup:
Craig Galleka5836362016-02-10 11:50:38 -05001415 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th),
Eric Dumazet3b24d852016-04-01 08:52:17 -07001416 th->source, th->dest, inet6_iif(skb),
1417 &refcounted);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 if (!sk)
1419 goto no_tcp_socket;
1420
1421process:
1422 if (sk->sk_state == TCP_TIME_WAIT)
1423 goto do_time_wait;
1424
Eric Dumazet079096f2015-10-02 11:43:32 -07001425 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1426 struct request_sock *req = inet_reqsk(sk);
Eric Dumazet77166822016-02-18 05:39:18 -08001427 struct sock *nsk;
Eric Dumazet079096f2015-10-02 11:43:32 -07001428
1429 sk = req->rsk_listener;
1430 tcp_v6_fill_cb(skb, hdr, th);
1431 if (tcp_v6_inbound_md5_hash(sk, skb)) {
Eric Dumazete65c3322016-08-24 08:50:24 -07001432 sk_drops_add(sk, skb);
Eric Dumazet079096f2015-10-02 11:43:32 -07001433 reqsk_put(req);
1434 goto discard_it;
1435 }
Eric Dumazet77166822016-02-18 05:39:18 -08001436 if (unlikely(sk->sk_state != TCP_LISTEN)) {
Eric Dumazetf03f2e12015-10-14 11:16:27 -07001437 inet_csk_reqsk_queue_drop_and_put(sk, req);
Eric Dumazet4bdc3d62015-10-13 17:12:54 -07001438 goto lookup;
1439 }
Eric Dumazet77166822016-02-18 05:39:18 -08001440 sock_hold(sk);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001441 refcounted = true;
Eric Dumazet77166822016-02-18 05:39:18 -08001442 nsk = tcp_check_req(sk, skb, req, false);
Eric Dumazet079096f2015-10-02 11:43:32 -07001443 if (!nsk) {
1444 reqsk_put(req);
Eric Dumazet77166822016-02-18 05:39:18 -08001445 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001446 }
1447 if (nsk == sk) {
Eric Dumazet079096f2015-10-02 11:43:32 -07001448 reqsk_put(req);
1449 tcp_v6_restore_cb(skb);
1450 } else if (tcp_child_process(sk, nsk, skb)) {
1451 tcp_v6_send_reset(nsk, skb);
Eric Dumazet77166822016-02-18 05:39:18 -08001452 goto discard_and_relse;
Eric Dumazet079096f2015-10-02 11:43:32 -07001453 } else {
Eric Dumazet77166822016-02-18 05:39:18 -08001454 sock_put(sk);
Eric Dumazet079096f2015-10-02 11:43:32 -07001455 return 0;
1456 }
1457 }
Stephen Hemmingere802af92010-04-22 15:24:53 -07001458 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
Eric Dumazet02a1d6e2016-04-27 16:44:39 -07001459 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
Stephen Hemmingere802af92010-04-22 15:24:53 -07001460 goto discard_and_relse;
1461 }
1462
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
1464 goto discard_and_relse;
1465
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001466 tcp_v6_fill_cb(skb, hdr, th);
1467
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001468 if (tcp_v6_inbound_md5_hash(sk, skb))
1469 goto discard_and_relse;
Dmitry Popov9ea88a12014-08-07 02:38:22 +04001470
Eric Dumazetac6e7802016-11-10 13:12:35 -08001471 if (tcp_filter(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 goto discard_and_relse;
Eric Dumazetac6e7802016-11-10 13:12:35 -08001473 th = (const struct tcphdr *)skb->data;
1474 hdr = ipv6_hdr(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476 skb->dev = NULL;
1477
Eric Dumazete994b2f2015-10-02 11:43:39 -07001478 if (sk->sk_state == TCP_LISTEN) {
1479 ret = tcp_v6_do_rcv(sk, skb);
1480 goto put_and_return;
1481 }
1482
1483 sk_incoming_cpu_update(sk);
1484
Fabio Olive Leite293b9c42006-09-25 22:28:47 -07001485 bh_lock_sock_nested(sk);
Martin KaFai Laua44d6ea2016-03-14 10:52:15 -07001486 tcp_segs_in(tcp_sk(sk), skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 ret = 0;
1488 if (!sock_owned_by_user(sk)) {
Dan Williams7bced392013-12-30 12:37:29 -08001489 if (!tcp_prequeue(sk, skb))
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001490 ret = tcp_v6_do_rcv(sk, skb);
Eric Dumazetc9c33212016-08-27 07:37:54 -07001491 } else if (tcp_add_backlog(sk, skb)) {
Zhu Yi6b03a532010-03-04 18:01:41 +00001492 goto discard_and_relse;
1493 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 bh_unlock_sock(sk);
1495
Eric Dumazete994b2f2015-10-02 11:43:39 -07001496put_and_return:
Eric Dumazet3b24d852016-04-01 08:52:17 -07001497 if (refcounted)
1498 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 return ret ? -1 : 0;
1500
1501no_tcp_socket:
1502 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
1503 goto discard_it;
1504
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001505 tcp_v6_fill_cb(skb, hdr, th);
1506
Eric Dumazet12e25e12015-06-03 23:49:21 -07001507 if (tcp_checksum_complete(skb)) {
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001508csum_error:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001509 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510bad_packet:
Eric Dumazet90bbcc62016-04-27 16:44:32 -07001511 __TCP_INC_STATS(net, TCP_MIB_INERRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 } else {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001513 tcp_v6_send_reset(NULL, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 }
1515
1516discard_it:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 kfree_skb(skb);
1518 return 0;
1519
1520discard_and_relse:
Eric Dumazet532182c2016-04-01 08:52:19 -07001521 sk_drops_add(sk, skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001522 if (refcounted)
1523 sock_put(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 goto discard_it;
1525
1526do_time_wait:
1527 if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001528 inet_twsk_put(inet_twsk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 goto discard_it;
1530 }
1531
Nicolas Dichtel2dc49d12014-12-22 18:22:48 +01001532 tcp_v6_fill_cb(skb, hdr, th);
1533
Eric Dumazet6a5dc9e2013-04-29 08:39:56 +00001534 if (tcp_checksum_complete(skb)) {
1535 inet_twsk_put(inet_twsk(sk));
1536 goto csum_error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 }
1538
YOSHIFUJI Hideaki9469c7b2006-10-10 19:41:46 -07001539 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 case TCP_TW_SYN:
1541 {
1542 struct sock *sk2;
1543
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09001544 sk2 = inet6_lookup_listener(dev_net(skb->dev), &tcp_hashinfo,
Craig Galleka5836362016-02-10 11:50:38 -05001545 skb, __tcp_hdrlen(th),
Tom Herbert5ba24952013-01-22 09:50:39 +00001546 &ipv6_hdr(skb)->saddr, th->source,
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07001547 &ipv6_hdr(skb)->daddr,
Eric Dumazet870c3152014-10-17 09:17:20 -07001548 ntohs(th->dest), tcp_v6_iif(skb));
Ian Morris53b24b82015-03-29 14:00:05 +01001549 if (sk2) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07001550 struct inet_timewait_sock *tw = inet_twsk(sk);
Eric Dumazetdbe7faa2015-07-08 14:28:30 -07001551 inet_twsk_deschedule_put(tw);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 sk = sk2;
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001553 tcp_v6_restore_cb(skb);
Eric Dumazet3b24d852016-04-01 08:52:17 -07001554 refcounted = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 goto process;
1556 }
1557 /* Fall through to ACK */
1558 }
1559 case TCP_TW_ACK:
1560 tcp_v6_timewait_ack(sk, skb);
1561 break;
1562 case TCP_TW_RST:
Alexey Kodanev4ad19de2015-03-27 12:24:22 +03001563 tcp_v6_restore_cb(skb);
Florian Westphal271c3b92015-12-21 21:29:26 +01001564 tcp_v6_send_reset(sk, skb);
1565 inet_twsk_deschedule_put(inet_twsk(sk));
1566 goto discard_it;
Wang Yufen4aa956d2014-03-29 09:27:29 +08001567 case TCP_TW_SUCCESS:
1568 ;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 }
1570 goto discard_it;
1571}
1572
Eric Dumazetc7109982012-07-26 12:18:11 +00001573static void tcp_v6_early_demux(struct sk_buff *skb)
1574{
1575 const struct ipv6hdr *hdr;
1576 const struct tcphdr *th;
1577 struct sock *sk;
1578
1579 if (skb->pkt_type != PACKET_HOST)
1580 return;
1581
1582 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1583 return;
1584
1585 hdr = ipv6_hdr(skb);
1586 th = tcp_hdr(skb);
1587
1588 if (th->doff < sizeof(struct tcphdr) / 4)
1589 return;
1590
Eric Dumazet870c3152014-10-17 09:17:20 -07001591 /* Note : We use inet6_iif() here, not tcp_v6_iif() */
Eric Dumazetc7109982012-07-26 12:18:11 +00001592 sk = __inet6_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1593 &hdr->saddr, th->source,
1594 &hdr->daddr, ntohs(th->dest),
1595 inet6_iif(skb));
1596 if (sk) {
1597 skb->sk = sk;
1598 skb->destructor = sock_edemux;
Eric Dumazetf7e4eb02015-03-15 21:12:13 -07001599 if (sk_fullsock(sk)) {
Michal Kubečekd0c294c2015-03-23 15:14:00 +01001600 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
Neal Cardwellf3f12132012-10-22 21:41:48 +00001601
Eric Dumazetc7109982012-07-26 12:18:11 +00001602 if (dst)
Eric Dumazet5d299f32012-08-06 05:09:33 +00001603 dst = dst_check(dst, inet6_sk(sk)->rx_dst_cookie);
Eric Dumazetc7109982012-07-26 12:18:11 +00001604 if (dst &&
Neal Cardwellf3f12132012-10-22 21:41:48 +00001605 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
Eric Dumazetc7109982012-07-26 12:18:11 +00001606 skb_dst_set_noref(skb, dst);
1607 }
1608 }
1609}
1610
David S. Millerccb7c412010-12-01 18:09:13 -08001611static struct timewait_sock_ops tcp6_timewait_sock_ops = {
1612 .twsk_obj_size = sizeof(struct tcp6_timewait_sock),
1613 .twsk_unique = tcp_twsk_unique,
Wang Yufen4aa956d2014-03-29 09:27:29 +08001614 .twsk_destructor = tcp_twsk_destructor,
David S. Millerccb7c412010-12-01 18:09:13 -08001615};
1616
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001617static const struct inet_connection_sock_af_ops ipv6_specific = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001618 .queue_xmit = inet6_csk_xmit,
1619 .send_check = tcp_v6_send_check,
1620 .rebuild_header = inet6_sk_rebuild_header,
Eric Dumazet5d299f32012-08-06 05:09:33 +00001621 .sk_rx_dst_set = inet6_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001622 .conn_request = tcp_v6_conn_request,
1623 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001624 .net_header_len = sizeof(struct ipv6hdr),
Eric Dumazet67469602012-04-24 07:37:38 +00001625 .net_frag_header_len = sizeof(struct frag_hdr),
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001626 .setsockopt = ipv6_setsockopt,
1627 .getsockopt = ipv6_getsockopt,
1628 .addr2sockaddr = inet6_csk_addr2sockaddr,
1629 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001630#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001631 .compat_setsockopt = compat_ipv6_setsockopt,
1632 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001633#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001634 .mtu_reduced = tcp_v6_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635};
1636
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001637#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001638static const struct tcp_sock_af_ops tcp_sock_ipv6_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001639 .md5_lookup = tcp_v6_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001640 .calc_md5_hash = tcp_v6_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001641 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001642};
David S. Millera9286302006-11-14 19:53:22 -08001643#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001644
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645/*
1646 * TCP over IPv4 via INET6 API
1647 */
Stephen Hemminger3b401a82009-09-01 19:25:04 +00001648static const struct inet_connection_sock_af_ops ipv6_mapped = {
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001649 .queue_xmit = ip_queue_xmit,
1650 .send_check = tcp_v4_send_check,
1651 .rebuild_header = inet_sk_rebuild_header,
Eric Dumazet63d02d12012-08-09 14:11:00 +00001652 .sk_rx_dst_set = inet_sk_rx_dst_set,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001653 .conn_request = tcp_v6_conn_request,
1654 .syn_recv_sock = tcp_v6_syn_recv_sock,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001655 .net_header_len = sizeof(struct iphdr),
1656 .setsockopt = ipv6_setsockopt,
1657 .getsockopt = ipv6_getsockopt,
1658 .addr2sockaddr = inet6_csk_addr2sockaddr,
1659 .sockaddr_len = sizeof(struct sockaddr_in6),
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001660#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001661 .compat_setsockopt = compat_ipv6_setsockopt,
1662 .compat_getsockopt = compat_ipv6_getsockopt,
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001663#endif
Neal Cardwell4fab9072014-08-14 12:40:05 -04001664 .mtu_reduced = tcp_v4_mtu_reduced,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665};
1666
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001667#ifdef CONFIG_TCP_MD5SIG
Stephen Hemmingerb2e4b3de2009-09-01 19:25:03 +00001668static const struct tcp_sock_af_ops tcp_sock_ipv6_mapped_specific = {
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001669 .md5_lookup = tcp_v4_md5_lookup,
Adam Langley49a72df2008-07-19 00:01:42 -07001670 .calc_md5_hash = tcp_v4_md5_hash_skb,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001671 .md5_parse = tcp_v6_parse_md5_keys,
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001672};
David S. Millera9286302006-11-14 19:53:22 -08001673#endif
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001674
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675/* NOTE: A lot of things set to zero explicitly by call to
1676 * sk_alloc() so need not be done here.
1677 */
1678static int tcp_v6_init_sock(struct sock *sk)
1679{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001680 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001681
Neal Cardwell900f65d2012-04-19 09:55:21 +00001682 tcp_init_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683
Arnaldo Carvalho de Melo8292a172005-12-13 23:15:52 -08001684 icsk->icsk_af_ops = &ipv6_specific;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001686#ifdef CONFIG_TCP_MD5SIG
David S. Millerac807fa2012-04-23 03:21:58 -04001687 tcp_sk(sk)->af_specific = &tcp_sock_ipv6_specific;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001688#endif
1689
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 return 0;
1691}
1692
Brian Haley7d06b2e2008-06-14 17:04:49 -07001693static void tcp_v6_destroy_sock(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 tcp_v4_destroy_sock(sk);
Brian Haley7d06b2e2008-06-14 17:04:49 -07001696 inet6_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697}
1698
YOSHIFUJI Hideaki952a10b2007-04-21 20:13:44 +09001699#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700/* Proc filesystem TCPv6 sock list dumping. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001701static void get_openreq6(struct seq_file *seq,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001702 const struct request_sock *req, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703{
Eric Dumazetfa76ce732015-03-19 19:04:20 -07001704 long ttd = req->rsk_timer.expires - jiffies;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001705 const struct in6_addr *src = &inet_rsk(req)->ir_v6_loc_addr;
1706 const struct in6_addr *dest = &inet_rsk(req)->ir_v6_rmt_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
1708 if (ttd < 0)
1709 ttd = 0;
1710
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 seq_printf(seq,
1712 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001713 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 i,
1715 src->s6_addr32[0], src->s6_addr32[1],
1716 src->s6_addr32[2], src->s6_addr32[3],
Eric Dumazetb44084c2013-10-10 00:04:37 -07001717 inet_rsk(req)->ir_num,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 dest->s6_addr32[0], dest->s6_addr32[1],
1719 dest->s6_addr32[2], dest->s6_addr32[3],
Eric Dumazet634fb9792013-10-09 15:21:29 -07001720 ntohs(inet_rsk(req)->ir_rmt_port),
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 TCP_SYN_RECV,
Weilong Chen4c99aa42013-12-19 18:44:34 +08001722 0, 0, /* could print option size, but that is af dependent. */
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001723 1, /* timers active (only the expire timer) */
1724 jiffies_to_clock_t(ttd),
Eric Dumazete6c022a2012-10-27 23:16:46 +00001725 req->num_timeout,
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001726 from_kuid_munged(seq_user_ns(seq),
1727 sock_i_uid(req->rsk_listener)),
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001728 0, /* non standard timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 0, /* open_requests have no inode */
1730 0, req);
1731}
1732
1733static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
1734{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001735 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 __u16 destp, srcp;
1737 int timer_active;
1738 unsigned long timer_expires;
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001739 const struct inet_sock *inet = inet_sk(sp);
1740 const struct tcp_sock *tp = tcp_sk(sp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001741 const struct inet_connection_sock *icsk = inet_csk(sp);
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001742 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001743 int rx_queue;
1744 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
Eric Dumazetefe42082013-10-03 15:42:29 -07001746 dest = &sp->sk_v6_daddr;
1747 src = &sp->sk_v6_rcv_saddr;
Eric Dumazetc720c7e82009-10-15 06:30:45 +00001748 destp = ntohs(inet->inet_dport);
1749 srcp = ntohs(inet->inet_sport);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001750
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001751 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
Yuchung Cheng57dde7f2017-01-12 22:11:33 -08001752 icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
Yuchung Chengce3cf4e2016-06-06 15:07:18 -07001753 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 timer_active = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001755 timer_expires = icsk->icsk_timeout;
1756 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 timer_active = 4;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001758 timer_expires = icsk->icsk_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 } else if (timer_pending(&sp->sk_timer)) {
1760 timer_active = 2;
1761 timer_expires = sp->sk_timer.expires;
1762 } else {
1763 timer_active = 0;
1764 timer_expires = jiffies;
1765 }
1766
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001767 state = sk_state_load(sp);
1768 if (state == TCP_LISTEN)
1769 rx_queue = sp->sk_ack_backlog;
1770 else
1771 /* Because we don't lock the socket,
1772 * we might find a transient negative value.
1773 */
1774 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
1775
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 seq_printf(seq,
1777 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Francesco Fuscod14c5ab2013-08-15 13:42:14 +02001778 "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 i,
1780 src->s6_addr32[0], src->s6_addr32[1],
1781 src->s6_addr32[2], src->s6_addr32[3], srcp,
1782 dest->s6_addr32[0], dest->s6_addr32[1],
1783 dest->s6_addr32[2], dest->s6_addr32[3], destp,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001784 state,
1785 tp->write_seq - tp->snd_una,
1786 rx_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 timer_active,
Eric Dumazeta399a802012-08-08 21:13:53 +00001788 jiffies_delta_to_clock_t(timer_expires - jiffies),
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001789 icsk->icsk_retransmits,
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001790 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001791 icsk->icsk_probes_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 sock_i_ino(sp),
1793 atomic_read(&sp->sk_refcnt), sp,
Stephen Hemminger7be87352008-06-27 20:00:19 -07001794 jiffies_to_clock_t(icsk->icsk_rto),
1795 jiffies_to_clock_t(icsk->icsk_ack.ato),
Weilong Chen4c99aa42013-12-19 18:44:34 +08001796 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -07001797 tp->snd_cwnd,
Eric Dumazet00fd38d2015-11-12 08:43:18 -08001798 state == TCP_LISTEN ?
Eric Dumazet0536fcc2015-09-29 07:42:52 -07001799 fastopenq->max_qlen :
Yuchung Cheng0a672f72014-05-11 20:22:12 -07001800 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 );
1802}
1803
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09001804static void get_timewait6_sock(struct seq_file *seq,
Arnaldo Carvalho de Melo8feaf0c02005-08-09 20:09:30 -07001805 struct inet_timewait_sock *tw, int i)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806{
Eric Dumazet789f5582015-04-12 18:51:09 -07001807 long delta = tw->tw_timer.expires - jiffies;
Eric Dumazetb71d1d42011-04-22 04:53:02 +00001808 const struct in6_addr *dest, *src;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809 __u16 destp, srcp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
Eric Dumazetefe42082013-10-03 15:42:29 -07001811 dest = &tw->tw_v6_daddr;
1812 src = &tw->tw_v6_rcv_saddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 destp = ntohs(tw->tw_dport);
1814 srcp = ntohs(tw->tw_sport);
1815
1816 seq_printf(seq,
1817 "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
Dan Rosenberg71338aa2011-05-23 12:17:35 +00001818 "%02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK\n",
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819 i,
1820 src->s6_addr32[0], src->s6_addr32[1],
1821 src->s6_addr32[2], src->s6_addr32[3], srcp,
1822 dest->s6_addr32[0], dest->s6_addr32[1],
1823 dest->s6_addr32[2], dest->s6_addr32[3], destp,
1824 tw->tw_substate, 0, 0,
Eric Dumazeta399a802012-08-08 21:13:53 +00001825 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 atomic_read(&tw->tw_refcnt), tw);
1827}
1828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829static int tcp6_seq_show(struct seq_file *seq, void *v)
1830{
1831 struct tcp_iter_state *st;
Eric Dumazet05dbc7b2013-10-03 00:22:02 -07001832 struct sock *sk = v;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833
1834 if (v == SEQ_START_TOKEN) {
1835 seq_puts(seq,
1836 " sl "
1837 "local_address "
1838 "remote_address "
1839 "st tx_queue rx_queue tr tm->when retrnsmt"
1840 " uid timeout inode\n");
1841 goto out;
1842 }
1843 st = seq->private;
1844
Eric Dumazet079096f2015-10-02 11:43:32 -07001845 if (sk->sk_state == TCP_TIME_WAIT)
1846 get_timewait6_sock(seq, v, st->num);
1847 else if (sk->sk_state == TCP_NEW_SYN_RECV)
Eric Dumazetaa3a0c82015-10-02 11:43:30 -07001848 get_openreq6(seq, v, st->num);
Eric Dumazet079096f2015-10-02 11:43:32 -07001849 else
1850 get_tcp6_sock(seq, v, st->num);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851out:
1852 return 0;
1853}
1854
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001855static const struct file_operations tcp6_afinfo_seq_fops = {
1856 .owner = THIS_MODULE,
1857 .open = tcp_seq_open,
1858 .read = seq_read,
1859 .llseek = seq_lseek,
1860 .release = seq_release_net
1861};
1862
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863static struct tcp_seq_afinfo tcp6_seq_afinfo = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 .name = "tcp6",
1865 .family = AF_INET6,
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001866 .seq_fops = &tcp6_afinfo_seq_fops,
Denis V. Lunev9427c4b2008-04-13 22:12:13 -07001867 .seq_ops = {
1868 .show = tcp6_seq_show,
1869 },
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870};
1871
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001872int __net_init tcp6_proc_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001874 return tcp_proc_register(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875}
1876
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001877void tcp6_proc_exit(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878{
Daniel Lezcano6f8b13b2008-03-21 04:14:45 -07001879 tcp_proc_unregister(net, &tcp6_seq_afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880}
1881#endif
1882
1883struct proto tcpv6_prot = {
1884 .name = "TCPv6",
1885 .owner = THIS_MODULE,
1886 .close = tcp_close,
1887 .connect = tcp_v6_connect,
1888 .disconnect = tcp_disconnect,
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001889 .accept = inet_csk_accept,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 .ioctl = tcp_ioctl,
1891 .init = tcp_v6_init_sock,
1892 .destroy = tcp_v6_destroy_sock,
1893 .shutdown = tcp_shutdown,
1894 .setsockopt = tcp_setsockopt,
1895 .getsockopt = tcp_getsockopt,
Ursula Braun4b9d07a2017-01-09 16:55:12 +01001896 .keepalive = tcp_set_keepalive,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897 .recvmsg = tcp_recvmsg,
Changli Gao7ba42912010-07-10 20:41:55 +00001898 .sendmsg = tcp_sendmsg,
1899 .sendpage = tcp_sendpage,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 .backlog_rcv = tcp_v6_do_rcv,
Eric Dumazet46d3cea2012-07-11 05:50:31 +00001901 .release_cb = tcp_release_cb,
Craig Gallek496611d2016-02-10 11:50:36 -05001902 .hash = inet6_hash,
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001903 .unhash = inet_unhash,
1904 .get_port = inet_csk_get_port,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001905 .enter_memory_pressure = tcp_enter_memory_pressure,
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001906 .stream_memory_free = tcp_stream_memory_free,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 .sockets_allocated = &tcp_sockets_allocated,
1908 .memory_allocated = &tcp_memory_allocated,
1909 .memory_pressure = &tcp_memory_pressure,
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001910 .orphan_count = &tcp_orphan_count,
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -07001911 .sysctl_mem = sysctl_tcp_mem,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 .sysctl_wmem = sysctl_tcp_wmem,
1913 .sysctl_rmem = sysctl_tcp_rmem,
1914 .max_header = MAX_TCP_HEADER,
1915 .obj_size = sizeof(struct tcp6_sock),
Eric Dumazet3ab5aee2008-11-16 19:40:17 -08001916 .slab_flags = SLAB_DESTROY_BY_RCU,
Arnaldo Carvalho de Melo6d6ee432005-12-13 23:25:19 -08001917 .twsk_prot = &tcp6_timewait_sock_ops,
Arnaldo Carvalho de Melo60236fd2005-06-18 22:47:21 -07001918 .rsk_prot = &tcp6_request_sock_ops,
Pavel Emelyanov39d8cda2008-03-22 16:50:58 -07001919 .h.hashinfo = &tcp_hashinfo,
Changli Gao7ba42912010-07-10 20:41:55 +00001920 .no_autobind = true,
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001921#ifdef CONFIG_COMPAT
1922 .compat_setsockopt = compat_tcp_setsockopt,
1923 .compat_getsockopt = compat_tcp_getsockopt,
1924#endif
Lorenzo Colittic1e64e22015-12-16 12:30:05 +09001925 .diag_destroy = tcp_abort,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001926};
1927
Alexey Dobriyan41135cc2009-09-14 12:22:28 +00001928static const struct inet6_protocol tcpv6_protocol = {
Eric Dumazetc7109982012-07-26 12:18:11 +00001929 .early_demux = tcp_v6_early_demux,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 .handler = tcp_v6_rcv,
1931 .err_handler = tcp_v6_err,
1932 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
1933};
1934
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935static struct inet_protosw tcpv6_protosw = {
1936 .type = SOCK_STREAM,
1937 .protocol = IPPROTO_TCP,
1938 .prot = &tcpv6_prot,
1939 .ops = &inet6_stream_ops,
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08001940 .flags = INET_PROTOSW_PERMANENT |
1941 INET_PROTOSW_ICSK,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942};
1943
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001944static int __net_init tcpv6_net_init(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001945{
Denis V. Lunev56772422008-04-03 14:28:30 -07001946 return inet_ctl_sock_create(&net->ipv6.tcp_sk, PF_INET6,
1947 SOCK_RAW, IPPROTO_TCP, net);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001948}
1949
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001950static void __net_exit tcpv6_net_exit(struct net *net)
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001951{
Denis V. Lunev56772422008-04-03 14:28:30 -07001952 inet_ctl_sock_destroy(net->ipv6.tcp_sk);
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001953}
1954
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00001955static void __net_exit tcpv6_net_exit_batch(struct list_head *net_exit_list)
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001956{
Haishuang Yan1946e672016-12-28 17:52:32 +08001957 inet_twsk_purge(&tcp_hashinfo, AF_INET6);
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001958}
1959
1960static struct pernet_operations tcpv6_net_ops = {
Eric W. Biedermanb099ce22009-12-03 02:29:09 +00001961 .init = tcpv6_net_init,
1962 .exit = tcpv6_net_exit,
1963 .exit_batch = tcpv6_net_exit_batch,
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001964};
1965
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001966int __init tcpv6_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967{
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001968 int ret;
David Woodhouseae0f7d52006-01-11 15:53:04 -08001969
Vlad Yasevich33362882012-11-15 08:49:15 +00001970 ret = inet6_add_protocol(&tcpv6_protocol, IPPROTO_TCP);
1971 if (ret)
Vlad Yasevichc6b641a2012-11-15 08:49:22 +00001972 goto out;
Vlad Yasevich33362882012-11-15 08:49:15 +00001973
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001974 /* register inet6 protocol */
1975 ret = inet6_register_protosw(&tcpv6_protosw);
1976 if (ret)
1977 goto out_tcpv6_protocol;
1978
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001979 ret = register_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001980 if (ret)
1981 goto out_tcpv6_protosw;
1982out:
1983 return ret;
1984
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001985out_tcpv6_protosw:
1986 inet6_unregister_protosw(&tcpv6_protosw);
Vlad Yasevich33362882012-11-15 08:49:15 +00001987out_tcpv6_protocol:
1988 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001989 goto out;
1990}
1991
Daniel Lezcano09f77092007-12-13 05:34:58 -08001992void tcpv6_exit(void)
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001993{
Daniel Lezcano93ec9262008-03-07 11:16:02 -08001994 unregister_pernet_subsys(&tcpv6_net_ops);
Daniel Lezcano7f4e4862007-12-11 02:25:35 -08001995 inet6_unregister_protosw(&tcpv6_protosw);
1996 inet6_del_protocol(&tcpv6_protocol, IPPROTO_TCP);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997}