Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * RAW sockets for IPv6 |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 4 | * Linux INET6 implementation |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
| 6 | * Authors: |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 7 | * Pedro Roque <roque@di.fc.ul.pt> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | * |
| 9 | * Adapted from linux/net/ipv4/raw.c |
| 10 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * Fixes: |
| 12 | * Hideaki YOSHIFUJI : sin6_scope_id support |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 13 | * YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | * Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | */ |
| 16 | |
| 17 | #include <linux/errno.h> |
| 18 | #include <linux/types.h> |
| 19 | #include <linux/socket.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/sockios.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 22 | #include <linux/net.h> |
| 23 | #include <linux/in6.h> |
| 24 | #include <linux/netdevice.h> |
| 25 | #include <linux/if_arp.h> |
| 26 | #include <linux/icmpv6.h> |
| 27 | #include <linux/netfilter.h> |
| 28 | #include <linux/netfilter_ipv6.h> |
Herbert Xu | 3305b80 | 2005-12-13 23:16:37 -0800 | [diff] [blame] | 29 | #include <linux/skbuff.h> |
David S. Miller | e2d5776 | 2011-02-03 17:59:32 -0800 | [diff] [blame] | 30 | #include <linux/compat.h> |
Alex W Slater | 29778be | 2015-02-19 21:58:07 +0000 | [diff] [blame] | 31 | #include <linux/uaccess.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 32 | #include <asm/ioctls.h> |
| 33 | |
Eric W. Biederman | 457c4cb | 2007-09-12 12:01:34 +0200 | [diff] [blame] | 34 | #include <net/net_namespace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <net/ip.h> |
| 36 | #include <net/sock.h> |
| 37 | #include <net/snmp.h> |
| 38 | |
| 39 | #include <net/ipv6.h> |
| 40 | #include <net/ndisc.h> |
| 41 | #include <net/protocol.h> |
| 42 | #include <net/ip6_route.h> |
| 43 | #include <net/ip6_checksum.h> |
| 44 | #include <net/addrconf.h> |
| 45 | #include <net/transp_v6.h> |
| 46 | #include <net/udp.h> |
| 47 | #include <net/inet_common.h> |
Arnaldo Carvalho de Melo | c752f07 | 2005-08-09 20:08:28 -0700 | [diff] [blame] | 48 | #include <net/tcp_states.h> |
Amerigo Wang | 07a9362 | 2012-10-29 16:23:10 +0000 | [diff] [blame] | 49 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
Masahide NAKAMURA | 7be96f7 | 2006-08-23 20:35:31 -0700 | [diff] [blame] | 50 | #include <net/mip6.h> |
| 51 | #endif |
YOSHIFUJI Hideaki | 7bc570c | 2008-04-03 09:22:53 +0900 | [diff] [blame] | 52 | #include <linux/mroute6.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
Pavel Emelyanov | b673e4d | 2007-11-19 22:36:45 -0800 | [diff] [blame] | 54 | #include <net/raw.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | #include <net/rawv6.h> |
| 56 | #include <net/xfrm.h> |
| 57 | |
| 58 | #include <linux/proc_fs.h> |
| 59 | #include <linux/seq_file.h> |
Paul Gortmaker | bc3b2d7 | 2011-07-15 11:47:34 -0400 | [diff] [blame] | 60 | #include <linux/export.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | |
Werner Almesberger | d1c53c8 | 2013-08-02 10:51:34 -0300 | [diff] [blame] | 62 | #define ICMPV6_HDRLEN 4 /* ICMPv6 header, RFC 4443 Section 2.1 */ |
| 63 | |
Cyrill Gorcunov | 432490f | 2016-10-21 13:03:44 +0300 | [diff] [blame] | 64 | struct raw_hashinfo raw_v6_hashinfo = { |
Robert P. J. Day | 938b93a | 2008-03-18 00:59:23 -0700 | [diff] [blame] | 65 | .lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock), |
Pavel Emelyanov | b673e4d | 2007-11-19 22:36:45 -0800 | [diff] [blame] | 66 | }; |
Cyrill Gorcunov | 432490f | 2016-10-21 13:03:44 +0300 | [diff] [blame] | 67 | EXPORT_SYMBOL_GPL(raw_v6_hashinfo); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | |
Cyrill Gorcunov | 432490f | 2016-10-21 13:03:44 +0300 | [diff] [blame] | 69 | struct sock *__raw_v6_lookup(struct net *net, struct sock *sk, |
Eric Dumazet | b71d1d4 | 2011-04-22 04:53:02 +0000 | [diff] [blame] | 70 | unsigned short num, const struct in6_addr *loc_addr, |
David Ahern | 5108ab4 | 2017-08-07 08:44:22 -0700 | [diff] [blame] | 71 | const struct in6_addr *rmt_addr, int dif, int sdif) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | { |
Eric Dumazet | a50feda | 2012-05-18 18:57:34 +0000 | [diff] [blame] | 73 | bool is_multicast = ipv6_addr_is_multicast(loc_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
Sasha Levin | b67bfe0 | 2013-02-27 17:06:00 -0800 | [diff] [blame] | 75 | sk_for_each_from(sk) |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 76 | if (inet_sk(sk)->inet_num == num) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | |
YOSHIFUJI Hideaki | 878628f | 2008-03-26 03:57:35 +0900 | [diff] [blame] | 78 | if (!net_eq(sock_net(sk), net)) |
Pavel Emelyanov | be185884 | 2008-01-14 05:35:31 -0800 | [diff] [blame] | 79 | continue; |
| 80 | |
Eric Dumazet | efe4208 | 2013-10-03 15:42:29 -0700 | [diff] [blame] | 81 | if (!ipv6_addr_any(&sk->sk_v6_daddr) && |
| 82 | !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 83 | continue; |
| 84 | |
Duncan Eastoe | 7055420 | 2018-11-07 15:36:06 +0000 | [diff] [blame] | 85 | if (!raw_sk_bound_dev_eq(net, sk->sk_bound_dev_if, |
| 86 | dif, sdif)) |
Andrew McDonald | 0bd1b59 | 2005-08-09 19:44:42 -0700 | [diff] [blame] | 87 | continue; |
| 88 | |
Eric Dumazet | efe4208 | 2013-10-03 15:42:29 -0700 | [diff] [blame] | 89 | if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) { |
| 90 | if (ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | goto found; |
| 92 | if (is_multicast && |
| 93 | inet6_mc_check(sk, loc_addr, rmt_addr)) |
| 94 | goto found; |
| 95 | continue; |
| 96 | } |
| 97 | goto found; |
| 98 | } |
| 99 | sk = NULL; |
| 100 | found: |
| 101 | return sk; |
| 102 | } |
Cyrill Gorcunov | 432490f | 2016-10-21 13:03:44 +0300 | [diff] [blame] | 103 | EXPORT_SYMBOL_GPL(__raw_v6_lookup); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 104 | |
| 105 | /* |
| 106 | * 0 - deliver |
| 107 | * 1 - block |
| 108 | */ |
Eric Dumazet | 1b05c4b | 2012-09-25 07:03:40 +0000 | [diff] [blame] | 109 | static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 110 | { |
Werner Almesberger | 9cc08af | 2013-08-02 10:51:19 -0300 | [diff] [blame] | 111 | struct icmp6hdr _hdr; |
Eric Dumazet | 1b05c4b | 2012-09-25 07:03:40 +0000 | [diff] [blame] | 112 | const struct icmp6hdr *hdr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 113 | |
Werner Almesberger | d1c53c8 | 2013-08-02 10:51:34 -0300 | [diff] [blame] | 114 | /* We require only the four bytes of the ICMPv6 header, not any |
| 115 | * additional bytes of message body in "struct icmp6hdr". |
| 116 | */ |
Eric Dumazet | 1b05c4b | 2012-09-25 07:03:40 +0000 | [diff] [blame] | 117 | hdr = skb_header_pointer(skb, skb_transport_offset(skb), |
Werner Almesberger | d1c53c8 | 2013-08-02 10:51:34 -0300 | [diff] [blame] | 118 | ICMPV6_HDRLEN, &_hdr); |
Eric Dumazet | 1b05c4b | 2012-09-25 07:03:40 +0000 | [diff] [blame] | 119 | if (hdr) { |
| 120 | const __u32 *data = &raw6_sk(sk)->filter.data[0]; |
| 121 | unsigned int type = hdr->icmp6_type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 122 | |
Eric Dumazet | 1b05c4b | 2012-09-25 07:03:40 +0000 | [diff] [blame] | 123 | return (data[type >> 5] & (1U << (type & 31))) != 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | } |
Eric Dumazet | 1b05c4b | 2012-09-25 07:03:40 +0000 | [diff] [blame] | 125 | return 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 126 | } |
| 127 | |
Amerigo Wang | 07a9362 | 2012-10-29 16:23:10 +0000 | [diff] [blame] | 128 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
Eric Dumazet | f2eda47 | 2011-01-20 07:37:53 +0000 | [diff] [blame] | 129 | typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb); |
Masahide NAKAMURA | 59fbb3a6 | 2007-06-26 23:56:32 -0700 | [diff] [blame] | 130 | |
Eric Dumazet | f2eda47 | 2011-01-20 07:37:53 +0000 | [diff] [blame] | 131 | static mh_filter_t __rcu *mh_filter __read_mostly; |
| 132 | |
| 133 | int rawv6_mh_filter_register(mh_filter_t filter) |
Masahide NAKAMURA | 59fbb3a6 | 2007-06-26 23:56:32 -0700 | [diff] [blame] | 134 | { |
Eric Dumazet | cf778b0 | 2012-01-12 04:41:32 +0000 | [diff] [blame] | 135 | rcu_assign_pointer(mh_filter, filter); |
Masahide NAKAMURA | 59fbb3a6 | 2007-06-26 23:56:32 -0700 | [diff] [blame] | 136 | return 0; |
| 137 | } |
| 138 | EXPORT_SYMBOL(rawv6_mh_filter_register); |
| 139 | |
Eric Dumazet | f2eda47 | 2011-01-20 07:37:53 +0000 | [diff] [blame] | 140 | int rawv6_mh_filter_unregister(mh_filter_t filter) |
Masahide NAKAMURA | 59fbb3a6 | 2007-06-26 23:56:32 -0700 | [diff] [blame] | 141 | { |
Stephen Hemminger | a9b3cd7 | 2011-08-01 16:19:00 +0000 | [diff] [blame] | 142 | RCU_INIT_POINTER(mh_filter, NULL); |
Masahide NAKAMURA | 59fbb3a6 | 2007-06-26 23:56:32 -0700 | [diff] [blame] | 143 | synchronize_rcu(); |
| 144 | return 0; |
| 145 | } |
| 146 | EXPORT_SYMBOL(rawv6_mh_filter_unregister); |
| 147 | |
| 148 | #endif |
| 149 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | /* |
| 151 | * demultiplex raw sockets. |
| 152 | * (should consider queueing the skb in the sock receive_queue |
| 153 | * without calling rawv6.c) |
| 154 | * |
| 155 | * Caller owns SKB so we must make clones. |
| 156 | */ |
Eric Dumazet | a50feda | 2012-05-18 18:57:34 +0000 | [diff] [blame] | 157 | static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | { |
Eric Dumazet | b71d1d4 | 2011-04-22 04:53:02 +0000 | [diff] [blame] | 159 | const struct in6_addr *saddr; |
| 160 | const struct in6_addr *daddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | struct sock *sk; |
Eric Dumazet | a50feda | 2012-05-18 18:57:34 +0000 | [diff] [blame] | 162 | bool delivered = false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | __u8 hash; |
Pavel Emelyanov | be185884 | 2008-01-14 05:35:31 -0800 | [diff] [blame] | 164 | struct net *net; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | |
Arnaldo Carvalho de Melo | 0660e03 | 2007-04-25 17:54:47 -0700 | [diff] [blame] | 166 | saddr = &ipv6_hdr(skb)->saddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 167 | daddr = saddr + 1; |
| 168 | |
David S. Miller | f9242b6 | 2012-06-19 18:56:21 -0700 | [diff] [blame] | 169 | hash = nexthdr & (RAW_HTABLE_SIZE - 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | |
Pavel Emelyanov | b673e4d | 2007-11-19 22:36:45 -0800 | [diff] [blame] | 171 | read_lock(&raw_v6_hashinfo.lock); |
| 172 | sk = sk_head(&raw_v6_hashinfo.ht[hash]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 173 | |
Ian Morris | 63159f2 | 2015-03-29 14:00:04 +0100 | [diff] [blame] | 174 | if (!sk) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | goto out; |
| 176 | |
YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 177 | net = dev_net(skb->dev); |
David Ahern | 5108ab4 | 2017-08-07 08:44:22 -0700 | [diff] [blame] | 178 | sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, |
| 179 | inet6_iif(skb), inet6_sdif(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | |
| 181 | while (sk) { |
Masahide NAKAMURA | 7be96f7 | 2006-08-23 20:35:31 -0700 | [diff] [blame] | 182 | int filtered; |
| 183 | |
Eric Dumazet | a50feda | 2012-05-18 18:57:34 +0000 | [diff] [blame] | 184 | delivered = true; |
Masahide NAKAMURA | 7be96f7 | 2006-08-23 20:35:31 -0700 | [diff] [blame] | 185 | switch (nexthdr) { |
| 186 | case IPPROTO_ICMPV6: |
| 187 | filtered = icmpv6_filter(sk, skb); |
| 188 | break; |
Masahide NAKAMURA | 59fbb3a6 | 2007-06-26 23:56:32 -0700 | [diff] [blame] | 189 | |
Amerigo Wang | 07a9362 | 2012-10-29 16:23:10 +0000 | [diff] [blame] | 190 | #if IS_ENABLED(CONFIG_IPV6_MIP6) |
Masahide NAKAMURA | 7be96f7 | 2006-08-23 20:35:31 -0700 | [diff] [blame] | 191 | case IPPROTO_MH: |
Masahide NAKAMURA | 59fbb3a6 | 2007-06-26 23:56:32 -0700 | [diff] [blame] | 192 | { |
Masahide NAKAMURA | 7be96f7 | 2006-08-23 20:35:31 -0700 | [diff] [blame] | 193 | /* XXX: To validate MH only once for each packet, |
| 194 | * this is placed here. It should be after checking |
| 195 | * xfrm policy, however it doesn't. The checking xfrm |
| 196 | * policy is placed in rawv6_rcv() because it is |
| 197 | * required for each socket. |
| 198 | */ |
Eric Dumazet | f2eda47 | 2011-01-20 07:37:53 +0000 | [diff] [blame] | 199 | mh_filter_t *filter; |
Masahide NAKAMURA | 59fbb3a6 | 2007-06-26 23:56:32 -0700 | [diff] [blame] | 200 | |
| 201 | filter = rcu_dereference(mh_filter); |
Eric Dumazet | f2eda47 | 2011-01-20 07:37:53 +0000 | [diff] [blame] | 202 | filtered = filter ? (*filter)(sk, skb) : 0; |
Masahide NAKAMURA | 7be96f7 | 2006-08-23 20:35:31 -0700 | [diff] [blame] | 203 | break; |
Masahide NAKAMURA | 59fbb3a6 | 2007-06-26 23:56:32 -0700 | [diff] [blame] | 204 | } |
Masahide NAKAMURA | 7be96f7 | 2006-08-23 20:35:31 -0700 | [diff] [blame] | 205 | #endif |
| 206 | default: |
| 207 | filtered = 0; |
| 208 | break; |
| 209 | } |
| 210 | |
| 211 | if (filtered < 0) |
| 212 | break; |
| 213 | if (filtered == 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC); |
| 215 | |
| 216 | /* Not releasing hash table! */ |
Yasuyuki Kozakai | 9fb9cbb | 2005-11-09 16:38:16 -0800 | [diff] [blame] | 217 | if (clone) { |
Florian Westphal | 895b5c9 | 2019-09-29 20:54:03 +0200 | [diff] [blame] | 218 | nf_reset_ct(clone); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | rawv6_rcv(sk, clone); |
Yasuyuki Kozakai | 9fb9cbb | 2005-11-09 16:38:16 -0800 | [diff] [blame] | 220 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | } |
Pavel Emelyanov | be185884 | 2008-01-14 05:35:31 -0800 | [diff] [blame] | 222 | sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr, |
David Ahern | 5108ab4 | 2017-08-07 08:44:22 -0700 | [diff] [blame] | 223 | inet6_iif(skb), inet6_sdif(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | } |
| 225 | out: |
Pavel Emelyanov | b673e4d | 2007-11-19 22:36:45 -0800 | [diff] [blame] | 226 | read_unlock(&raw_v6_hashinfo.lock); |
Patrick McHardy | d13964f | 2005-08-09 19:45:02 -0700 | [diff] [blame] | 227 | return delivered; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | } |
| 229 | |
Eric Dumazet | a50feda | 2012-05-18 18:57:34 +0000 | [diff] [blame] | 230 | bool raw6_local_deliver(struct sk_buff *skb, int nexthdr) |
Pavel Emelyanov | 69d6da0 | 2007-11-19 22:35:57 -0800 | [diff] [blame] | 231 | { |
| 232 | struct sock *raw_sk; |
| 233 | |
David S. Miller | f9242b6 | 2012-06-19 18:56:21 -0700 | [diff] [blame] | 234 | raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (RAW_HTABLE_SIZE - 1)]); |
Pavel Emelyanov | 69d6da0 | 2007-11-19 22:35:57 -0800 | [diff] [blame] | 235 | if (raw_sk && !ipv6_raw_deliver(skb, nexthdr)) |
| 236 | raw_sk = NULL; |
| 237 | |
| 238 | return raw_sk != NULL; |
| 239 | } |
| 240 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 241 | /* This cleans up af_inet6 a bit. -DaveM */ |
| 242 | static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len) |
| 243 | { |
| 244 | struct inet_sock *inet = inet_sk(sk); |
| 245 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 246 | struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr; |
Al Viro | e69a4ad | 2006-11-14 20:56:00 -0800 | [diff] [blame] | 247 | __be32 v4addr = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | int addr_type; |
| 249 | int err; |
| 250 | |
| 251 | if (addr_len < SIN6_LEN_RFC2133) |
| 252 | return -EINVAL; |
Hannes Frederic Sowa | 82b276c | 2014-01-20 05:16:39 +0100 | [diff] [blame] | 253 | |
| 254 | if (addr->sin6_family != AF_INET6) |
| 255 | return -EINVAL; |
| 256 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | addr_type = ipv6_addr_type(&addr->sin6_addr); |
| 258 | |
| 259 | /* Raw sockets are IPv6 only */ |
| 260 | if (addr_type == IPV6_ADDR_MAPPED) |
Eric Dumazet | fd5c002 | 2009-11-06 07:01:17 +0000 | [diff] [blame] | 261 | return -EADDRNOTAVAIL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 262 | |
| 263 | lock_sock(sk); |
| 264 | |
| 265 | err = -EINVAL; |
| 266 | if (sk->sk_state != TCP_CLOSE) |
| 267 | goto out; |
| 268 | |
Eric Dumazet | fd5c002 | 2009-11-06 07:01:17 +0000 | [diff] [blame] | 269 | rcu_read_lock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 270 | /* Check if the address belongs to the host. */ |
| 271 | if (addr_type != IPV6_ADDR_ANY) { |
| 272 | struct net_device *dev = NULL; |
| 273 | |
Hannes Frederic Sowa | 842df07 | 2013-03-08 02:07:19 +0000 | [diff] [blame] | 274 | if (__ipv6_addr_needs_scope_id(addr_type)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | if (addr_len >= sizeof(struct sockaddr_in6) && |
| 276 | addr->sin6_scope_id) { |
| 277 | /* Override any existing binding, if another |
| 278 | * one is supplied by user. |
| 279 | */ |
| 280 | sk->sk_bound_dev_if = addr->sin6_scope_id; |
| 281 | } |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 282 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | /* Binding to link-local address requires an interface */ |
| 284 | if (!sk->sk_bound_dev_if) |
Eric Dumazet | fd5c002 | 2009-11-06 07:01:17 +0000 | [diff] [blame] | 285 | goto out_unlock; |
Mike Manning | 72f7cfa | 2019-05-20 19:57:17 +0100 | [diff] [blame] | 286 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | |
Mike Manning | 72f7cfa | 2019-05-20 19:57:17 +0100 | [diff] [blame] | 288 | if (sk->sk_bound_dev_if) { |
Eric Dumazet | fd5c002 | 2009-11-06 07:01:17 +0000 | [diff] [blame] | 289 | err = -ENODEV; |
| 290 | dev = dev_get_by_index_rcu(sock_net(sk), |
| 291 | sk->sk_bound_dev_if); |
| 292 | if (!dev) |
| 293 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 294 | } |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 295 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 296 | /* ipv4 addr of the socket is invalid. Only the |
| 297 | * unspecified and mapped address have a v4 equivalent. |
| 298 | */ |
| 299 | v4addr = LOOPBACK4_IPV6; |
Tom Herbert | 35a256f | 2015-07-08 16:58:22 -0700 | [diff] [blame] | 300 | if (!(addr_type & IPV6_ADDR_MULTICAST) && |
Maciej Żenczykowski | 630e457 | 2021-04-05 00:06:52 -0700 | [diff] [blame] | 301 | !ipv6_can_nonlocal_bind(sock_net(sk), inet)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | err = -EADDRNOTAVAIL; |
YOSHIFUJI Hideaki | 3b1e0a6 | 2008-03-26 02:26:21 +0900 | [diff] [blame] | 303 | if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr, |
Daniel Lezcano | bfeade0 | 2008-01-10 22:43:18 -0800 | [diff] [blame] | 304 | dev, 0)) { |
Eric Dumazet | fd5c002 | 2009-11-06 07:01:17 +0000 | [diff] [blame] | 305 | goto out_unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | } |
| 307 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | } |
| 309 | |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 310 | inet->inet_rcv_saddr = inet->inet_saddr = v4addr; |
Eric Dumazet | efe4208 | 2013-10-03 15:42:29 -0700 | [diff] [blame] | 311 | sk->sk_v6_rcv_saddr = addr->sin6_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 312 | if (!(addr_type & IPV6_ADDR_MULTICAST)) |
Alexey Dobriyan | 4e3fd7a | 2011-11-21 03:39:03 +0000 | [diff] [blame] | 313 | np->saddr = addr->sin6_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | err = 0; |
Eric Dumazet | fd5c002 | 2009-11-06 07:01:17 +0000 | [diff] [blame] | 315 | out_unlock: |
| 316 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 317 | out: |
| 318 | release_sock(sk); |
| 319 | return err; |
| 320 | } |
| 321 | |
Pavel Emelyanov | 69d6da0 | 2007-11-19 22:35:57 -0800 | [diff] [blame] | 322 | static void rawv6_err(struct sock *sk, struct sk_buff *skb, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 323 | struct inet6_skb_parm *opt, |
Brian Haley | d5fdd6b | 2009-06-23 04:31:07 -0700 | [diff] [blame] | 324 | u8 type, u8 code, int offset, __be32 info) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | { |
| 326 | struct inet_sock *inet = inet_sk(sk); |
| 327 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 328 | int err; |
| 329 | int harderr; |
| 330 | |
| 331 | /* Report error on raw socket, if: |
| 332 | 1. User requested recverr. |
| 333 | 2. Socket is connected (otherwise the error indication |
| 334 | is useless without recverr and error is hard. |
| 335 | */ |
| 336 | if (!np->recverr && sk->sk_state != TCP_ESTABLISHED) |
| 337 | return; |
| 338 | |
| 339 | harderr = icmpv6_err_convert(type, code, &err); |
David S. Miller | 81aded2 | 2012-06-15 14:54:11 -0700 | [diff] [blame] | 340 | if (type == ICMPV6_PKT_TOOBIG) { |
| 341 | ip6_sk_update_pmtu(skb, sk, info); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | harderr = (np->pmtudisc == IPV6_PMTUDISC_DO); |
David S. Miller | 81aded2 | 2012-06-15 14:54:11 -0700 | [diff] [blame] | 343 | } |
Duan Jiong | 8d65b11 | 2013-09-20 18:21:25 +0800 | [diff] [blame] | 344 | if (type == NDISC_REDIRECT) { |
David S. Miller | ec18d9a | 2012-07-12 00:25:15 -0700 | [diff] [blame] | 345 | ip6_sk_redirect(skb, sk); |
Duan Jiong | 8d65b11 | 2013-09-20 18:21:25 +0800 | [diff] [blame] | 346 | return; |
| 347 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | if (np->recverr) { |
| 349 | u8 *payload = skb->data; |
| 350 | if (!inet->hdrincl) |
| 351 | payload += offset; |
| 352 | ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload); |
| 353 | } |
| 354 | |
| 355 | if (np->recverr || harderr) { |
| 356 | sk->sk_err = err; |
Alexander Aring | e3ae236 | 2021-06-27 18:48:21 -0400 | [diff] [blame] | 357 | sk_error_report(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | } |
| 359 | } |
| 360 | |
Pavel Emelyanov | 69d6da0 | 2007-11-19 22:35:57 -0800 | [diff] [blame] | 361 | void raw6_icmp_error(struct sk_buff *skb, int nexthdr, |
Brian Haley | d5fdd6b | 2009-06-23 04:31:07 -0700 | [diff] [blame] | 362 | u8 type, u8 code, int inner_offset, __be32 info) |
Pavel Emelyanov | 69d6da0 | 2007-11-19 22:35:57 -0800 | [diff] [blame] | 363 | { |
| 364 | struct sock *sk; |
| 365 | int hash; |
Eric Dumazet | b71d1d4 | 2011-04-22 04:53:02 +0000 | [diff] [blame] | 366 | const struct in6_addr *saddr, *daddr; |
Pavel Emelyanov | be185884 | 2008-01-14 05:35:31 -0800 | [diff] [blame] | 367 | struct net *net; |
Pavel Emelyanov | 69d6da0 | 2007-11-19 22:35:57 -0800 | [diff] [blame] | 368 | |
Pavel Emelyanov | b673e4d | 2007-11-19 22:36:45 -0800 | [diff] [blame] | 369 | hash = nexthdr & (RAW_HTABLE_SIZE - 1); |
Pavel Emelyanov | 69d6da0 | 2007-11-19 22:35:57 -0800 | [diff] [blame] | 370 | |
Pavel Emelyanov | b673e4d | 2007-11-19 22:36:45 -0800 | [diff] [blame] | 371 | read_lock(&raw_v6_hashinfo.lock); |
| 372 | sk = sk_head(&raw_v6_hashinfo.ht[hash]); |
Ian Morris | 53b24b8 | 2015-03-29 14:00:05 +0100 | [diff] [blame] | 373 | if (sk) { |
YOSHIFUJI Hideaki | 05f175c | 2008-04-11 23:51:26 +0900 | [diff] [blame] | 374 | /* Note: ipv6_hdr(skb) != skb->data */ |
Eric Dumazet | b71d1d4 | 2011-04-22 04:53:02 +0000 | [diff] [blame] | 375 | const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data; |
YOSHIFUJI Hideaki | 05f175c | 2008-04-11 23:51:26 +0900 | [diff] [blame] | 376 | saddr = &ip6h->saddr; |
| 377 | daddr = &ip6h->daddr; |
YOSHIFUJI Hideaki | c346dca | 2008-03-25 21:47:49 +0900 | [diff] [blame] | 378 | net = dev_net(skb->dev); |
Pavel Emelyanov | 69d6da0 | 2007-11-19 22:35:57 -0800 | [diff] [blame] | 379 | |
Pavel Emelyanov | be185884 | 2008-01-14 05:35:31 -0800 | [diff] [blame] | 380 | while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr, |
David Ahern | 5108ab4 | 2017-08-07 08:44:22 -0700 | [diff] [blame] | 381 | inet6_iif(skb), inet6_iif(skb)))) { |
Pavel Emelyanov | 69d6da0 | 2007-11-19 22:35:57 -0800 | [diff] [blame] | 382 | rawv6_err(sk, skb, NULL, type, code, |
| 383 | inner_offset, info); |
| 384 | sk = sk_next(sk); |
| 385 | } |
| 386 | } |
Pavel Emelyanov | b673e4d | 2007-11-19 22:36:45 -0800 | [diff] [blame] | 387 | read_unlock(&raw_v6_hashinfo.lock); |
Pavel Emelyanov | 69d6da0 | 2007-11-19 22:35:57 -0800 | [diff] [blame] | 388 | } |
| 389 | |
Eric Dumazet | 33d480c | 2011-08-11 19:30:52 +0000 | [diff] [blame] | 390 | static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 391 | { |
Eric Dumazet | 33d480c | 2011-08-11 19:30:52 +0000 | [diff] [blame] | 392 | if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) && |
Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 393 | skb_checksum_complete(skb)) { |
Wang Chen | a92aa31 | 2007-11-13 20:31:14 -0800 | [diff] [blame] | 394 | atomic_inc(&sk->sk_drops); |
Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 395 | kfree_skb(skb); |
Yang Hongyang | 3cc76ca | 2008-08-29 14:06:51 -0700 | [diff] [blame] | 396 | return NET_RX_DROP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | } |
| 398 | |
| 399 | /* Charge it to the socket. */ |
Eric Dumazet | d826eb1 | 2011-11-09 07:24:35 +0000 | [diff] [blame] | 400 | skb_dst_drop(skb); |
| 401 | if (sock_queue_rcv_skb(sk, skb) < 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | kfree_skb(skb); |
Yang Hongyang | 3cc76ca | 2008-08-29 14:06:51 -0700 | [diff] [blame] | 403 | return NET_RX_DROP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | } |
| 405 | |
| 406 | return 0; |
| 407 | } |
| 408 | |
| 409 | /* |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 410 | * This is next to useless... |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 411 | * if we demultiplex in network layer we don't need the extra call |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 412 | * just to queue the skb... |
| 413 | * maybe we could have the network decide upon a hint if it |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 414 | * should call raw_rcv for demultiplexing |
| 415 | */ |
| 416 | int rawv6_rcv(struct sock *sk, struct sk_buff *skb) |
| 417 | { |
| 418 | struct inet_sock *inet = inet_sk(sk); |
| 419 | struct raw6_sock *rp = raw6_sk(sk); |
| 420 | |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 421 | if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) { |
Wang Chen | a92aa31 | 2007-11-13 20:31:14 -0800 | [diff] [blame] | 422 | atomic_inc(&sk->sk_drops); |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 423 | kfree_skb(skb); |
| 424 | return NET_RX_DROP; |
| 425 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | |
| 427 | if (!rp->checksum) |
| 428 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
| 429 | |
Patrick McHardy | 84fa793 | 2006-08-29 16:44:56 -0700 | [diff] [blame] | 430 | if (skb->ip_summed == CHECKSUM_COMPLETE) { |
Arnaldo Carvalho de Melo | d56f90a | 2007-04-10 20:50:43 -0700 | [diff] [blame] | 431 | skb_postpull_rcsum(skb, skb_network_header(skb), |
Arnaldo Carvalho de Melo | cfe1fc7 | 2007-03-16 17:26:39 -0300 | [diff] [blame] | 432 | skb_network_header_len(skb)); |
Arnaldo Carvalho de Melo | 0660e03 | 2007-04-25 17:54:47 -0700 | [diff] [blame] | 433 | if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
| 434 | &ipv6_hdr(skb)->daddr, |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 435 | skb->len, inet->inet_num, skb->csum)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 437 | } |
Herbert Xu | 6047637 | 2007-04-09 11:59:39 -0700 | [diff] [blame] | 438 | if (!skb_csum_unnecessary(skb)) |
Arnaldo Carvalho de Melo | 0660e03 | 2007-04-25 17:54:47 -0700 | [diff] [blame] | 439 | skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr, |
| 440 | &ipv6_hdr(skb)->daddr, |
| 441 | skb->len, |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 442 | inet->inet_num, 0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 443 | |
| 444 | if (inet->hdrincl) { |
Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 445 | if (skb_checksum_complete(skb)) { |
Wang Chen | a92aa31 | 2007-11-13 20:31:14 -0800 | [diff] [blame] | 446 | atomic_inc(&sk->sk_drops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | kfree_skb(skb); |
Yang Hongyang | 3cc76ca | 2008-08-29 14:06:51 -0700 | [diff] [blame] | 448 | return NET_RX_DROP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | } |
| 451 | |
| 452 | rawv6_rcv_skb(sk, skb); |
| 453 | return 0; |
| 454 | } |
| 455 | |
| 456 | |
| 457 | /* |
| 458 | * This should be easy, if there is something there |
| 459 | * we return it, otherwise we block. |
| 460 | */ |
| 461 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 462 | static int rawv6_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
| 463 | int noblock, int flags, int *addr_len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 464 | { |
| 465 | struct ipv6_pinfo *np = inet6_sk(sk); |
Steffen Hurrle | 342dfc3 | 2014-01-17 22:53:15 +0100 | [diff] [blame] | 466 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 467 | struct sk_buff *skb; |
| 468 | size_t copied; |
| 469 | int err; |
| 470 | |
| 471 | if (flags & MSG_OOB) |
| 472 | return -EOPNOTSUPP; |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 473 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 474 | if (flags & MSG_ERRQUEUE) |
Hannes Frederic Sowa | 85fbaa7 | 2013-11-23 00:46:12 +0100 | [diff] [blame] | 475 | return ipv6_recv_error(sk, msg, len, addr_len); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 476 | |
Brian Haley | 4b340ae | 2010-04-23 11:26:09 +0000 | [diff] [blame] | 477 | if (np->rxpmtu && np->rxopt.bits.rxpmtu) |
Hannes Frederic Sowa | 85fbaa7 | 2013-11-23 00:46:12 +0100 | [diff] [blame] | 478 | return ipv6_recv_rxpmtu(sk, msg, len, addr_len); |
Brian Haley | 4b340ae | 2010-04-23 11:26:09 +0000 | [diff] [blame] | 479 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | skb = skb_recv_datagram(sk, flags, noblock, &err); |
| 481 | if (!skb) |
| 482 | goto out; |
| 483 | |
| 484 | copied = skb->len; |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 485 | if (copied > len) { |
| 486 | copied = len; |
| 487 | msg->msg_flags |= MSG_TRUNC; |
| 488 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 489 | |
Herbert Xu | 6047637 | 2007-04-09 11:59:39 -0700 | [diff] [blame] | 490 | if (skb_csum_unnecessary(skb)) { |
David S. Miller | 51f3d02 | 2014-11-05 16:46:40 -0500 | [diff] [blame] | 491 | err = skb_copy_datagram_msg(skb, 0, msg, copied); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 492 | } else if (msg->msg_flags&MSG_TRUNC) { |
Herbert Xu | fb286bb | 2005-11-10 13:01:24 -0800 | [diff] [blame] | 493 | if (__skb_checksum_complete(skb)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | goto csum_copy_err; |
David S. Miller | 51f3d02 | 2014-11-05 16:46:40 -0500 | [diff] [blame] | 495 | err = skb_copy_datagram_msg(skb, 0, msg, copied); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | } else { |
Al Viro | 227158d | 2014-04-06 18:47:38 -0400 | [diff] [blame] | 497 | err = skb_copy_and_csum_datagram_msg(skb, 0, msg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | if (err == -EINVAL) |
| 499 | goto csum_copy_err; |
| 500 | } |
| 501 | if (err) |
| 502 | goto out_free; |
| 503 | |
| 504 | /* Copy the address. */ |
| 505 | if (sin6) { |
| 506 | sin6->sin6_family = AF_INET6; |
Tetsuo Handa | f59fc7f | 2006-07-25 17:05:35 -0700 | [diff] [blame] | 507 | sin6->sin6_port = 0; |
Alexey Dobriyan | 4e3fd7a | 2011-11-21 03:39:03 +0000 | [diff] [blame] | 508 | sin6->sin6_addr = ipv6_hdr(skb)->saddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 509 | sin6->sin6_flowinfo = 0; |
Hannes Frederic Sowa | 842df07 | 2013-03-08 02:07:19 +0000 | [diff] [blame] | 510 | sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr, |
Duan Jiong | 4330487 | 2014-08-01 09:52:58 +0800 | [diff] [blame] | 511 | inet6_iif(skb)); |
Hannes Frederic Sowa | bceaa90 | 2013-11-18 04:20:45 +0100 | [diff] [blame] | 512 | *addr_len = sizeof(*sin6); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | } |
| 514 | |
Neil Horman | 3b88578 | 2009-10-12 13:26:31 -0700 | [diff] [blame] | 515 | sock_recv_ts_and_drops(msg, sk, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | |
| 517 | if (np->rxopt.all) |
Tom Parkin | 73df66f | 2013-01-31 01:02:24 +0000 | [diff] [blame] | 518 | ip6_datagram_recv_ctl(sk, msg, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 519 | |
| 520 | err = copied; |
| 521 | if (flags & MSG_TRUNC) |
| 522 | err = skb->len; |
| 523 | |
| 524 | out_free: |
| 525 | skb_free_datagram(sk, skb); |
| 526 | out: |
| 527 | return err; |
| 528 | |
| 529 | csum_copy_err: |
Herbert Xu | 3305b80 | 2005-12-13 23:16:37 -0800 | [diff] [blame] | 530 | skb_kill_datagram(sk, skb, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 531 | |
| 532 | /* Error for blocking case is chosen to masquerade |
| 533 | as some normal condition. |
| 534 | */ |
| 535 | err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH; |
Herbert Xu | 3305b80 | 2005-12-13 23:16:37 -0800 | [diff] [blame] | 536 | goto out; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 | } |
| 538 | |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 539 | static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 540 | struct raw6_sock *rp) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | { |
| 542 | struct sk_buff *skb; |
| 543 | int err = 0; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 544 | int offset; |
| 545 | int len; |
Herbert Xu | 679a873 | 2005-05-03 14:24:36 -0700 | [diff] [blame] | 546 | int total_len; |
Al Viro | 868c86b | 2006-11-14 21:35:48 -0800 | [diff] [blame] | 547 | __wsum tmp_csum; |
| 548 | __sum16 csum; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | |
| 550 | if (!rp->checksum) |
| 551 | goto send; |
| 552 | |
Fabian Frederick | 43728fa | 2014-10-29 12:57:51 +0100 | [diff] [blame] | 553 | skb = skb_peek(&sk->sk_write_queue); |
| 554 | if (!skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 555 | goto out; |
| 556 | |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 557 | offset = rp->offset; |
Steffen Klassert | 299b076 | 2011-10-11 01:43:33 +0000 | [diff] [blame] | 558 | total_len = inet_sk(sk)->cork.base.length; |
Herbert Xu | 679a873 | 2005-05-03 14:24:36 -0700 | [diff] [blame] | 559 | if (offset >= total_len - 1) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | err = -EINVAL; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 561 | ip6_flush_pending_frames(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 562 | goto out; |
| 563 | } |
| 564 | |
| 565 | /* should be check HW csum miyazawa */ |
| 566 | if (skb_queue_len(&sk->sk_write_queue) == 1) { |
| 567 | /* |
| 568 | * Only one fragment on the socket. |
| 569 | */ |
| 570 | tmp_csum = skb->csum; |
| 571 | } else { |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 572 | struct sk_buff *csum_skb = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 573 | tmp_csum = 0; |
| 574 | |
| 575 | skb_queue_walk(&sk->sk_write_queue, skb) { |
| 576 | tmp_csum = csum_add(tmp_csum, skb->csum); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 577 | |
| 578 | if (csum_skb) |
| 579 | continue; |
| 580 | |
Arnaldo Carvalho de Melo | ea2ae17 | 2007-04-25 17:55:53 -0700 | [diff] [blame] | 581 | len = skb->len - skb_transport_offset(skb); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 582 | if (offset >= len) { |
| 583 | offset -= len; |
| 584 | continue; |
| 585 | } |
| 586 | |
| 587 | csum_skb = skb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 588 | } |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 589 | |
| 590 | skb = csum_skb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 591 | } |
| 592 | |
Arnaldo Carvalho de Melo | ea2ae17 | 2007-04-25 17:55:53 -0700 | [diff] [blame] | 593 | offset += skb_transport_offset(skb); |
Dave Jones | a98f917 | 2016-12-22 11:16:22 -0500 | [diff] [blame] | 594 | err = skb_copy_bits(skb, offset, &csum, 2); |
| 595 | if (err < 0) { |
| 596 | ip6_flush_pending_frames(sk); |
| 597 | goto out; |
| 598 | } |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 599 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 600 | /* in case cksum was not initialized */ |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 601 | if (unlikely(csum)) |
Al Viro | 5f92a73 | 2006-11-14 21:36:54 -0800 | [diff] [blame] | 602 | tmp_csum = csum_sub(tmp_csum, csum_unfold(csum)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 604 | csum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr, |
| 605 | total_len, fl6->flowi6_proto, tmp_csum); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 606 | |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 607 | if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP) |
Al Viro | f6ab028 | 2006-11-16 02:36:50 -0800 | [diff] [blame] | 608 | csum = CSUM_MANGLED_0; |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 609 | |
Himangi Saraogi | 8242fc3 | 2014-07-12 01:55:38 +0530 | [diff] [blame] | 610 | BUG_ON(skb_store_bits(skb, offset, &csum, 2)); |
Herbert Xu | 357b40a | 2005-04-19 22:30:14 -0700 | [diff] [blame] | 611 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 612 | send: |
| 613 | err = ip6_push_pending_frames(sk); |
| 614 | out: |
| 615 | return err; |
| 616 | } |
| 617 | |
Al Viro | c3c1a7d | 2014-11-27 19:36:28 -0500 | [diff] [blame] | 618 | static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length, |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 619 | struct flowi6 *fl6, struct dst_entry **dstp, |
Jesus Sanchez-Palencia | a818f75 | 2018-07-03 15:42:50 -0700 | [diff] [blame] | 620 | unsigned int flags, const struct sockcm_cookie *sockc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | { |
Herbert Xu | 3320da8 | 2005-04-19 22:32:22 -0700 | [diff] [blame] | 622 | struct ipv6_pinfo *np = inet6_sk(sk); |
Eric W. Biederman | adb28c9 | 2015-09-15 20:04:11 -0500 | [diff] [blame] | 623 | struct net *net = sock_net(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 624 | struct ipv6hdr *iph; |
| 625 | struct sk_buff *skb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 626 | int err; |
Eric Dumazet | 1789a64 | 2010-06-03 22:23:57 +0000 | [diff] [blame] | 627 | struct rt6_info *rt = (struct rt6_info *)*dstp; |
Herbert Xu | a7ae199 | 2011-11-18 02:20:04 +0000 | [diff] [blame] | 628 | int hlen = LL_RESERVED_SPACE(rt->dst.dev); |
| 629 | int tlen = rt->dst.dev->needed_tailroom; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | |
Changli Gao | d8d1f30 | 2010-06-10 23:31:35 -0700 | [diff] [blame] | 631 | if (length > rt->dst.dev->mtu) { |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 632 | ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 633 | return -EMSGSIZE; |
| 634 | } |
Alexander Potapenko | 86f4c90 | 2017-05-03 17:06:58 +0200 | [diff] [blame] | 635 | if (length < sizeof(struct ipv6hdr)) |
| 636 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 637 | if (flags&MSG_PROBE) |
| 638 | goto out; |
| 639 | |
Johannes Berg | f5184d2 | 2008-05-12 20:48:31 -0700 | [diff] [blame] | 640 | skb = sock_alloc_send_skb(sk, |
Herbert Xu | a7ae199 | 2011-11-18 02:20:04 +0000 | [diff] [blame] | 641 | length + hlen + tlen + 15, |
Johannes Berg | f5184d2 | 2008-05-12 20:48:31 -0700 | [diff] [blame] | 642 | flags & MSG_DONTWAIT, &err); |
Ian Morris | 63159f2 | 2015-03-29 14:00:04 +0100 | [diff] [blame] | 643 | if (!skb) |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 644 | goto error; |
Herbert Xu | a7ae199 | 2011-11-18 02:20:04 +0000 | [diff] [blame] | 645 | skb_reserve(skb, hlen); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 646 | |
Hannes Frederic Sowa | 9c9c9ad | 2013-08-26 12:31:23 +0200 | [diff] [blame] | 647 | skb->protocol = htons(ETH_P_IPV6); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 648 | skb->priority = sk->sk_priority; |
Willem de Bruijn | c6af0c2 | 2019-09-11 15:50:51 -0400 | [diff] [blame] | 649 | skb->mark = sockc->mark; |
Jesus Sanchez-Palencia | a818f75 | 2018-07-03 15:42:50 -0700 | [diff] [blame] | 650 | skb->tstamp = sockc->transmit_time; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 651 | |
Arnaldo Carvalho de Melo | 1ced98e | 2007-03-10 19:57:15 -0300 | [diff] [blame] | 652 | skb_put(skb, length); |
| 653 | skb_reset_network_header(skb); |
Arnaldo Carvalho de Melo | 0660e03 | 2007-04-25 17:54:47 -0700 | [diff] [blame] | 654 | iph = ipv6_hdr(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 655 | |
| 656 | skb->ip_summed = CHECKSUM_NONE; |
| 657 | |
Willem de Bruijn | 8f932f7 | 2018-12-17 12:24:00 -0500 | [diff] [blame] | 658 | skb_setup_tx_timestamp(skb, sockc->tsflags); |
Willem de Bruijn | fbfb232 | 2018-12-17 12:23:59 -0500 | [diff] [blame] | 659 | |
Julian Anastasov | 0dec879 | 2017-02-06 23:14:16 +0200 | [diff] [blame] | 660 | if (flags & MSG_CONFIRM) |
| 661 | skb_set_dst_pending_confirm(skb, 1); |
| 662 | |
Arnaldo Carvalho de Melo | b0e380b | 2007-04-10 21:21:55 -0700 | [diff] [blame] | 663 | skb->transport_header = skb->network_header; |
Al Viro | 21226ab | 2014-11-28 15:48:29 -0500 | [diff] [blame] | 664 | err = memcpy_from_msg(iph, msg, length); |
Wei Wang | a688caa | 2018-10-04 10:12:37 -0700 | [diff] [blame] | 665 | if (err) { |
| 666 | err = -EFAULT; |
| 667 | kfree_skb(skb); |
| 668 | goto error; |
| 669 | } |
| 670 | |
| 671 | skb_dst_set(skb, &rt->dst); |
| 672 | *dstp = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | |
David Ahern | a8e3e1a | 2016-09-10 12:09:53 -0700 | [diff] [blame] | 674 | /* if egress device is enslaved to an L3 master device pass the |
| 675 | * skb to its handler for processing |
| 676 | */ |
| 677 | skb = l3mdev_ip6_out(sk, skb); |
| 678 | if (unlikely(!skb)) |
| 679 | return 0; |
| 680 | |
Wei Wang | a688caa | 2018-10-04 10:12:37 -0700 | [diff] [blame] | 681 | /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev |
| 682 | * in the error path. Since skb has been freed, the dst could |
| 683 | * have been queued for deletion. |
| 684 | */ |
| 685 | rcu_read_lock(); |
Eric W. Biederman | adb28c9 | 2015-09-15 20:04:11 -0500 | [diff] [blame] | 686 | IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len); |
Eric W. Biederman | 29a26a5 | 2015-09-15 20:04:16 -0500 | [diff] [blame] | 687 | err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb, |
Eric W. Biederman | 13206b6 | 2015-10-07 16:48:35 -0500 | [diff] [blame] | 688 | NULL, rt->dst.dev, dst_output); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 689 | if (err > 0) |
Eric Dumazet | 6ce9e7b | 2009-09-02 18:05:33 -0700 | [diff] [blame] | 690 | err = net_xmit_errno(err); |
Wei Wang | a688caa | 2018-10-04 10:12:37 -0700 | [diff] [blame] | 691 | if (err) { |
| 692 | IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); |
| 693 | rcu_read_unlock(); |
| 694 | goto error_check; |
| 695 | } |
| 696 | rcu_read_unlock(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | out: |
| 698 | return 0; |
| 699 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | error: |
Eric W. Biederman | adb28c9 | 2015-09-15 20:04:11 -0500 | [diff] [blame] | 701 | IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS); |
Wei Wang | a688caa | 2018-10-04 10:12:37 -0700 | [diff] [blame] | 702 | error_check: |
Eric Dumazet | 6ce9e7b | 2009-09-02 18:05:33 -0700 | [diff] [blame] | 703 | if (err == -ENOBUFS && !np->recverr) |
| 704 | err = 0; |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 705 | return err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 706 | } |
| 707 | |
Al Viro | 19e3c66 | 2014-11-24 12:10:46 -0500 | [diff] [blame] | 708 | struct raw6_frag_vec { |
| 709 | struct msghdr *msg; |
| 710 | int hlen; |
| 711 | char c[4]; |
| 712 | }; |
| 713 | |
| 714 | static int rawv6_probe_proto_opt(struct raw6_frag_vec *rfv, struct flowi6 *fl6) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 715 | { |
Al Viro | 19e3c66 | 2014-11-24 12:10:46 -0500 | [diff] [blame] | 716 | int err = 0; |
| 717 | switch (fl6->flowi6_proto) { |
| 718 | case IPPROTO_ICMPV6: |
| 719 | rfv->hlen = 2; |
| 720 | err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen); |
| 721 | if (!err) { |
| 722 | fl6->fl6_icmp_type = rfv->c[0]; |
| 723 | fl6->fl6_icmp_code = rfv->c[1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | } |
Al Viro | 19e3c66 | 2014-11-24 12:10:46 -0500 | [diff] [blame] | 725 | break; |
| 726 | case IPPROTO_MH: |
| 727 | rfv->hlen = 4; |
| 728 | err = memcpy_from_msg(rfv->c, rfv->msg, rfv->hlen); |
| 729 | if (!err) |
| 730 | fl6->fl6_mh_type = rfv->c[2]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | } |
Al Viro | 19e3c66 | 2014-11-24 12:10:46 -0500 | [diff] [blame] | 732 | return err; |
| 733 | } |
| 734 | |
| 735 | static int raw6_getfrag(void *from, char *to, int offset, int len, int odd, |
| 736 | struct sk_buff *skb) |
| 737 | { |
| 738 | struct raw6_frag_vec *rfv = from; |
| 739 | |
| 740 | if (offset < rfv->hlen) { |
| 741 | int copy = min(rfv->hlen - offset, len); |
| 742 | |
| 743 | if (skb->ip_summed == CHECKSUM_PARTIAL) |
| 744 | memcpy(to, rfv->c + offset, copy); |
| 745 | else |
| 746 | skb->csum = csum_block_add( |
| 747 | skb->csum, |
| 748 | csum_partial_copy_nocheck(rfv->c + offset, |
Al Viro | cc44c17 | 2020-07-11 00:12:07 -0400 | [diff] [blame] | 749 | to, copy), |
Al Viro | 19e3c66 | 2014-11-24 12:10:46 -0500 | [diff] [blame] | 750 | odd); |
| 751 | |
| 752 | odd = 0; |
| 753 | offset += copy; |
| 754 | to += copy; |
| 755 | len -= copy; |
| 756 | |
| 757 | if (!len) |
| 758 | return 0; |
| 759 | } |
| 760 | |
| 761 | offset -= rfv->hlen; |
| 762 | |
Al Viro | f69e6d1 | 2014-11-24 13:23:40 -0500 | [diff] [blame] | 763 | return ip_generic_getfrag(rfv->msg, to, offset, len, odd, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | } |
| 765 | |
Ying Xue | 1b78414 | 2015-03-02 15:37:48 +0800 | [diff] [blame] | 766 | static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 767 | { |
Eric Dumazet | 45f6fad | 2015-11-29 19:37:57 -0800 | [diff] [blame] | 768 | struct ipv6_txoptions *opt_to_free = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 769 | struct ipv6_txoptions opt_space; |
Steffen Hurrle | 342dfc3 | 2014-01-17 22:53:15 +0100 | [diff] [blame] | 770 | DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); |
Arnaud Ebalard | 20c59de | 2010-06-01 21:35:01 +0000 | [diff] [blame] | 771 | struct in6_addr *daddr, *final_p, final; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 772 | struct inet_sock *inet = inet_sk(sk); |
| 773 | struct ipv6_pinfo *np = inet6_sk(sk); |
| 774 | struct raw6_sock *rp = raw6_sk(sk); |
| 775 | struct ipv6_txoptions *opt = NULL; |
| 776 | struct ip6_flowlabel *flowlabel = NULL; |
| 777 | struct dst_entry *dst = NULL; |
Al Viro | 19e3c66 | 2014-11-24 12:10:46 -0500 | [diff] [blame] | 778 | struct raw6_frag_vec rfv; |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 779 | struct flowi6 fl6; |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 780 | struct ipcm6_cookie ipc6; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 781 | int addr_len = msg->msg_namelen; |
Olivier Matz | 59e3e4b | 2019-06-06 09:15:18 +0200 | [diff] [blame] | 782 | int hdrincl; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 783 | u16 proto; |
| 784 | int err; |
| 785 | |
| 786 | /* Rough check on arithmetic overflow, |
YOSHIFUJI Hideaki | b59e139 | 2007-03-30 14:45:35 -0700 | [diff] [blame] | 787 | better check is made in ip6_append_data(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 788 | */ |
YOSHIFUJI Hideaki | b59e139 | 2007-03-30 14:45:35 -0700 | [diff] [blame] | 789 | if (len > INT_MAX) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 790 | return -EMSGSIZE; |
| 791 | |
| 792 | /* Mirror BSD error message compatibility */ |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 793 | if (msg->msg_flags & MSG_OOB) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 794 | return -EOPNOTSUPP; |
| 795 | |
Olivier Matz | 59e3e4b | 2019-06-06 09:15:18 +0200 | [diff] [blame] | 796 | /* hdrincl should be READ_ONCE(inet->hdrincl) |
| 797 | * but READ_ONCE() doesn't work with bit fields. |
| 798 | * Doing this indirectly yields the same result. |
| 799 | */ |
| 800 | hdrincl = inet->hdrincl; |
| 801 | hdrincl = READ_ONCE(hdrincl); |
| 802 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 803 | /* |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 804 | * Get and verify the address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 805 | */ |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 806 | memset(&fl6, 0, sizeof(fl6)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 808 | fl6.flowi6_mark = sk->sk_mark; |
Lorenzo Colitti | e2d118a | 2016-11-04 02:23:43 +0900 | [diff] [blame] | 809 | fl6.flowi6_uid = sk->sk_uid; |
Laszlo Attila Toth | 4a19ec5 | 2008-01-30 19:08:16 -0800 | [diff] [blame] | 810 | |
Willem de Bruijn | b515430a | 2018-07-06 10:12:55 -0400 | [diff] [blame] | 811 | ipcm6_init(&ipc6); |
Willem de Bruijn | 5fdaa88 | 2018-07-06 10:12:57 -0400 | [diff] [blame] | 812 | ipc6.sockc.tsflags = sk->sk_tsflags; |
Willem de Bruijn | c6af0c2 | 2019-09-11 15:50:51 -0400 | [diff] [blame] | 813 | ipc6.sockc.mark = sk->sk_mark; |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 814 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 815 | if (sin6) { |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 816 | if (addr_len < SIN6_LEN_RFC2133) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 817 | return -EINVAL; |
| 818 | |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 819 | if (sin6->sin6_family && sin6->sin6_family != AF_INET6) |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 820 | return -EAFNOSUPPORT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | |
| 822 | /* port is the proto value [0..255] carried in nexthdr */ |
| 823 | proto = ntohs(sin6->sin6_port); |
| 824 | |
| 825 | if (!proto) |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 826 | proto = inet->inet_num; |
| 827 | else if (proto != inet->inet_num) |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 828 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 829 | |
| 830 | if (proto > 255) |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 831 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 832 | |
| 833 | daddr = &sin6->sin6_addr; |
| 834 | if (np->sndflow) { |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 835 | fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK; |
| 836 | if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) { |
| 837 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
Willem de Bruijn | 59c820b | 2019-07-07 05:34:45 -0400 | [diff] [blame] | 838 | if (IS_ERR(flowlabel)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 839 | return -EINVAL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 840 | } |
| 841 | } |
| 842 | |
| 843 | /* |
| 844 | * Otherwise it will be difficult to maintain |
| 845 | * sk->sk_dst_cache. |
| 846 | */ |
| 847 | if (sk->sk_state == TCP_ESTABLISHED && |
Eric Dumazet | efe4208 | 2013-10-03 15:42:29 -0700 | [diff] [blame] | 848 | ipv6_addr_equal(daddr, &sk->sk_v6_daddr)) |
| 849 | daddr = &sk->sk_v6_daddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 850 | |
| 851 | if (addr_len >= sizeof(struct sockaddr_in6) && |
| 852 | sin6->sin6_scope_id && |
Hannes Frederic Sowa | 842df07 | 2013-03-08 02:07:19 +0000 | [diff] [blame] | 853 | __ipv6_addr_needs_scope_id(__ipv6_addr_type(daddr))) |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 854 | fl6.flowi6_oif = sin6->sin6_scope_id; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 855 | } else { |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 856 | if (sk->sk_state != TCP_ESTABLISHED) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 857 | return -EDESTADDRREQ; |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 858 | |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 859 | proto = inet->inet_num; |
Eric Dumazet | efe4208 | 2013-10-03 15:42:29 -0700 | [diff] [blame] | 860 | daddr = &sk->sk_v6_daddr; |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 861 | fl6.flowlabel = np->flow_label; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 862 | } |
| 863 | |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 864 | if (fl6.flowi6_oif == 0) |
| 865 | fl6.flowi6_oif = sk->sk_bound_dev_if; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 866 | |
| 867 | if (msg->msg_controllen) { |
| 868 | opt = &opt_space; |
| 869 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
| 870 | opt->tot_len = sizeof(struct ipv6_txoptions); |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 871 | ipc6.opt = opt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 872 | |
Willem de Bruijn | 5fdaa88 | 2018-07-06 10:12:57 -0400 | [diff] [blame] | 873 | err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, &ipc6); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 874 | if (err < 0) { |
| 875 | fl6_sock_release(flowlabel); |
| 876 | return err; |
| 877 | } |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 878 | if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) { |
| 879 | flowlabel = fl6_sock_lookup(sk, fl6.flowlabel); |
Willem de Bruijn | 59c820b | 2019-07-07 05:34:45 -0400 | [diff] [blame] | 880 | if (IS_ERR(flowlabel)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 881 | return -EINVAL; |
| 882 | } |
| 883 | if (!(opt->opt_nflen|opt->opt_flen)) |
| 884 | opt = NULL; |
| 885 | } |
Eric Dumazet | 45f6fad | 2015-11-29 19:37:57 -0800 | [diff] [blame] | 886 | if (!opt) { |
| 887 | opt = txopt_get(np); |
| 888 | opt_to_free = opt; |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 889 | } |
YOSHIFUJI Hideaki | df9890c | 2005-11-20 12:23:18 +0900 | [diff] [blame] | 890 | if (flowlabel) |
| 891 | opt = fl6_merge_options(&opt_space, flowlabel, opt); |
| 892 | opt = ipv6_fixup_options(&opt_space, opt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 893 | |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 894 | fl6.flowi6_proto = proto; |
Willem de Bruijn | c6af0c2 | 2019-09-11 15:50:51 -0400 | [diff] [blame] | 895 | fl6.flowi6_mark = ipc6.sockc.mark; |
Olivier Matz | b9aa52c | 2019-06-06 09:15:19 +0200 | [diff] [blame] | 896 | |
| 897 | if (!hdrincl) { |
| 898 | rfv.msg = msg; |
| 899 | rfv.hlen = 0; |
| 900 | err = rawv6_probe_proto_opt(&rfv, &fl6); |
| 901 | if (err) |
| 902 | goto out; |
| 903 | } |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 904 | |
Brian Haley | 876c7f4 | 2008-04-11 00:38:24 -0400 | [diff] [blame] | 905 | if (!ipv6_addr_any(daddr)) |
Alexey Dobriyan | 4e3fd7a | 2011-11-21 03:39:03 +0000 | [diff] [blame] | 906 | fl6.daddr = *daddr; |
Brian Haley | 876c7f4 | 2008-04-11 00:38:24 -0400 | [diff] [blame] | 907 | else |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 908 | fl6.daddr.s6_addr[15] = 0x1; /* :: means loopback (BSD'ism) */ |
| 909 | if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr)) |
Alexey Dobriyan | 4e3fd7a | 2011-11-21 03:39:03 +0000 | [diff] [blame] | 910 | fl6.saddr = np->saddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 911 | |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 912 | final_p = fl6_update_dst(&fl6, opt, &final); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 913 | |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 914 | if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr)) |
| 915 | fl6.flowi6_oif = np->mcast_oif; |
Erich E. Hoover | c4062df | 2012-02-08 09:11:08 +0000 | [diff] [blame] | 916 | else if (!fl6.flowi6_oif) |
| 917 | fl6.flowi6_oif = np->ucast_oif; |
Paul Moore | 3df98d7 | 2020-09-27 22:38:26 -0400 | [diff] [blame] | 918 | security_sk_classify_flow(sk, flowi6_to_flowi_common(&fl6)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 919 | |
Olivier Matz | 59e3e4b | 2019-06-06 09:15:18 +0200 | [diff] [blame] | 920 | if (hdrincl) |
Martin KaFai Lau | 48e8aa6 | 2015-05-22 20:56:02 -0700 | [diff] [blame] | 921 | fl6.flowi6_flags |= FLOWI_FLAG_KNOWN_NH; |
| 922 | |
Hannes Frederic Sowa | 38b7097 | 2016-06-11 20:08:19 +0200 | [diff] [blame] | 923 | if (ipc6.tclass < 0) |
| 924 | ipc6.tclass = np->tclass; |
| 925 | |
| 926 | fl6.flowlabel = ip6_make_flowinfo(ipc6.tclass, fl6.flowlabel); |
| 927 | |
Sabrina Dubroca | c4e85f7 | 2019-12-04 15:35:52 +0100 | [diff] [blame] | 928 | dst = ip6_dst_lookup_flow(sock_net(sk), sk, &fl6, final_p); |
David S. Miller | 68d0c6d | 2011-03-01 13:19:07 -0800 | [diff] [blame] | 929 | if (IS_ERR(dst)) { |
| 930 | err = PTR_ERR(dst); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | goto out; |
David S. Miller | 14e50e5 | 2007-05-24 18:17:54 -0700 | [diff] [blame] | 932 | } |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 933 | if (ipc6.hlimit < 0) |
| 934 | ipc6.hlimit = ip6_sk_dst_hoplimit(np, &fl6, dst); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 935 | |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 936 | if (ipc6.dontfrag < 0) |
| 937 | ipc6.dontfrag = np->dontfrag; |
Brian Haley | 13b52cd | 2010-04-23 11:26:08 +0000 | [diff] [blame] | 938 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 939 | if (msg->msg_flags&MSG_CONFIRM) |
| 940 | goto do_confirm; |
| 941 | |
| 942 | back_from_confirm: |
Olivier Matz | 59e3e4b | 2019-06-06 09:15:18 +0200 | [diff] [blame] | 943 | if (hdrincl) |
Jesus Sanchez-Palencia | a818f75 | 2018-07-03 15:42:50 -0700 | [diff] [blame] | 944 | err = rawv6_send_hdrinc(sk, msg, len, &fl6, &dst, |
Willem de Bruijn | 5fdaa88 | 2018-07-06 10:12:57 -0400 | [diff] [blame] | 945 | msg->msg_flags, &ipc6.sockc); |
Eric Dumazet | 1789a64 | 2010-06-03 22:23:57 +0000 | [diff] [blame] | 946 | else { |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 947 | ipc6.opt = opt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | lock_sock(sk); |
Al Viro | 19e3c66 | 2014-11-24 12:10:46 -0500 | [diff] [blame] | 949 | err = ip6_append_data(sk, raw6_getfrag, &rfv, |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 950 | len, 0, &ipc6, &fl6, (struct rt6_info *)dst, |
Willem de Bruijn | 5fdaa88 | 2018-07-06 10:12:57 -0400 | [diff] [blame] | 951 | msg->msg_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 952 | |
| 953 | if (err) |
| 954 | ip6_flush_pending_frames(sk); |
| 955 | else if (!(msg->msg_flags & MSG_MORE)) |
David S. Miller | 4c9483b | 2011-03-12 16:22:43 -0500 | [diff] [blame] | 956 | err = rawv6_push_pending_frames(sk, &fl6, rp); |
YOSHIFUJI Hideaki | 3ef9d94 | 2007-09-14 16:45:40 -0700 | [diff] [blame] | 957 | release_sock(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 958 | } |
| 959 | done: |
Nicolas DICHTEL | 6d3e85e | 2006-02-13 15:56:13 -0800 | [diff] [blame] | 960 | dst_release(dst); |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 961 | out: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 962 | fl6_sock_release(flowlabel); |
Eric Dumazet | 45f6fad | 2015-11-29 19:37:57 -0800 | [diff] [blame] | 963 | txopt_put(opt_to_free); |
Ian Morris | 67ba415 | 2014-08-24 21:53:10 +0100 | [diff] [blame] | 964 | return err < 0 ? err : len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | do_confirm: |
Julian Anastasov | 0dec879 | 2017-02-06 23:14:16 +0200 | [diff] [blame] | 966 | if (msg->msg_flags & MSG_PROBE) |
| 967 | dst_confirm_neigh(dst, &fl6.daddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 968 | if (!(msg->msg_flags & MSG_PROBE) || len) |
| 969 | goto back_from_confirm; |
| 970 | err = 0; |
| 971 | goto done; |
| 972 | } |
| 973 | |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 974 | static int rawv6_seticmpfilter(struct sock *sk, int level, int optname, |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 975 | sockptr_t optval, int optlen) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 976 | { |
| 977 | switch (optname) { |
| 978 | case ICMPV6_FILTER: |
| 979 | if (optlen > sizeof(struct icmp6_filter)) |
| 980 | optlen = sizeof(struct icmp6_filter); |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 981 | if (copy_from_sockptr(&raw6_sk(sk)->filter, optval, optlen)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 982 | return -EFAULT; |
| 983 | return 0; |
| 984 | default: |
| 985 | return -ENOPROTOOPT; |
Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 986 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 987 | |
| 988 | return 0; |
| 989 | } |
| 990 | |
YOSHIFUJI Hideaki | 1ab1457 | 2007-02-09 23:24:49 +0900 | [diff] [blame] | 991 | static int rawv6_geticmpfilter(struct sock *sk, int level, int optname, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 992 | char __user *optval, int __user *optlen) |
| 993 | { |
| 994 | int len; |
| 995 | |
| 996 | switch (optname) { |
| 997 | case ICMPV6_FILTER: |
| 998 | if (get_user(len, optlen)) |
| 999 | return -EFAULT; |
| 1000 | if (len < 0) |
| 1001 | return -EINVAL; |
| 1002 | if (len > sizeof(struct icmp6_filter)) |
| 1003 | len = sizeof(struct icmp6_filter); |
| 1004 | if (put_user(len, optlen)) |
| 1005 | return -EFAULT; |
| 1006 | if (copy_to_user(optval, &raw6_sk(sk)->filter, len)) |
| 1007 | return -EFAULT; |
| 1008 | return 0; |
| 1009 | default: |
| 1010 | return -ENOPROTOOPT; |
Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 1011 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1012 | |
| 1013 | return 0; |
| 1014 | } |
| 1015 | |
| 1016 | |
Dmitry Mishin | 3fdadf7 | 2006-03-20 22:45:21 -0800 | [diff] [blame] | 1017 | static int do_rawv6_setsockopt(struct sock *sk, int level, int optname, |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 1018 | sockptr_t optval, unsigned int optlen) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1019 | { |
| 1020 | struct raw6_sock *rp = raw6_sk(sk); |
| 1021 | int val; |
| 1022 | |
Tamir Duberstein | fb7bc92 | 2021-12-29 15:09:47 -0500 | [diff] [blame] | 1023 | if (optlen < sizeof(val)) |
| 1024 | return -EINVAL; |
| 1025 | |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 1026 | if (copy_from_sockptr(&val, optval, sizeof(val))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1027 | return -EFAULT; |
| 1028 | |
| 1029 | switch (optname) { |
Hannes Frederic Sowa | 715f504 | 2015-12-16 17:22:47 +0100 | [diff] [blame] | 1030 | case IPV6_HDRINCL: |
| 1031 | if (sk->sk_type != SOCK_RAW) |
| 1032 | return -EINVAL; |
| 1033 | inet_sk(sk)->hdrincl = !!val; |
| 1034 | return 0; |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1035 | case IPV6_CHECKSUM: |
| 1036 | if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 && |
| 1037 | level == IPPROTO_IPV6) { |
| 1038 | /* |
| 1039 | * RFC3542 tells that IPV6_CHECKSUM socket |
| 1040 | * option in the IPPROTO_IPV6 level is not |
| 1041 | * allowed on ICMPv6 sockets. |
| 1042 | * If you want to set it, use IPPROTO_RAW |
| 1043 | * level IPV6_CHECKSUM socket option |
| 1044 | * (Linux extension). |
| 1045 | */ |
| 1046 | return -EINVAL; |
| 1047 | } |
YOSHIFUJI Hideaki | 1a98d05 | 2008-04-24 21:30:38 -0700 | [diff] [blame] | 1048 | |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1049 | /* You may get strange result with a positive odd offset; |
| 1050 | RFC2292bis agrees with me. */ |
| 1051 | if (val > 0 && (val&1)) |
| 1052 | return -EINVAL; |
| 1053 | if (val < 0) { |
| 1054 | rp->checksum = 0; |
| 1055 | } else { |
| 1056 | rp->checksum = 1; |
| 1057 | rp->offset = val; |
| 1058 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1059 | |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1060 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1061 | |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1062 | default: |
| 1063 | return -ENOPROTOOPT; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | } |
| 1065 | } |
| 1066 | |
Dmitry Mishin | 3fdadf7 | 2006-03-20 22:45:21 -0800 | [diff] [blame] | 1067 | static int rawv6_setsockopt(struct sock *sk, int level, int optname, |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 1068 | sockptr_t optval, unsigned int optlen) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1069 | { |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1070 | switch (level) { |
| 1071 | case SOL_RAW: |
| 1072 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1073 | |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1074 | case SOL_ICMPV6: |
| 1075 | if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
| 1076 | return -EOPNOTSUPP; |
| 1077 | return rawv6_seticmpfilter(sk, level, optname, optval, optlen); |
| 1078 | case SOL_IPV6: |
Hannes Frederic Sowa | 715f504 | 2015-12-16 17:22:47 +0100 | [diff] [blame] | 1079 | if (optname == IPV6_CHECKSUM || |
| 1080 | optname == IPV6_HDRINCL) |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1081 | break; |
Joe Perches | a8eceea | 2020-03-12 15:50:22 -0700 | [diff] [blame] | 1082 | fallthrough; |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1083 | default: |
| 1084 | return ipv6_setsockopt(sk, level, optname, optval, optlen); |
Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 1085 | } |
| 1086 | |
Dmitry Mishin | 3fdadf7 | 2006-03-20 22:45:21 -0800 | [diff] [blame] | 1087 | return do_rawv6_setsockopt(sk, level, optname, optval, optlen); |
| 1088 | } |
| 1089 | |
Dmitry Mishin | 3fdadf7 | 2006-03-20 22:45:21 -0800 | [diff] [blame] | 1090 | static int do_rawv6_getsockopt(struct sock *sk, int level, int optname, |
| 1091 | char __user *optval, int __user *optlen) |
| 1092 | { |
| 1093 | struct raw6_sock *rp = raw6_sk(sk); |
| 1094 | int val, len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1095 | |
Ian Morris | 67ba415 | 2014-08-24 21:53:10 +0100 | [diff] [blame] | 1096 | if (get_user(len, optlen)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1097 | return -EFAULT; |
| 1098 | |
| 1099 | switch (optname) { |
Hannes Frederic Sowa | 715f504 | 2015-12-16 17:22:47 +0100 | [diff] [blame] | 1100 | case IPV6_HDRINCL: |
| 1101 | val = inet_sk(sk)->hdrincl; |
| 1102 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1103 | case IPV6_CHECKSUM: |
YOSHIFUJI Hideaki | 1a98d05 | 2008-04-24 21:30:38 -0700 | [diff] [blame] | 1104 | /* |
| 1105 | * We allow getsockopt() for IPPROTO_IPV6-level |
| 1106 | * IPV6_CHECKSUM socket option on ICMPv6 sockets |
| 1107 | * since RFC3542 is silent about it. |
| 1108 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1109 | if (rp->checksum == 0) |
| 1110 | val = -1; |
| 1111 | else |
| 1112 | val = rp->offset; |
| 1113 | break; |
| 1114 | |
| 1115 | default: |
| 1116 | return -ENOPROTOOPT; |
| 1117 | } |
| 1118 | |
| 1119 | len = min_t(unsigned int, sizeof(int), len); |
| 1120 | |
| 1121 | if (put_user(len, optlen)) |
| 1122 | return -EFAULT; |
Ian Morris | 67ba415 | 2014-08-24 21:53:10 +0100 | [diff] [blame] | 1123 | if (copy_to_user(optval, &val, len)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | return -EFAULT; |
| 1125 | return 0; |
| 1126 | } |
| 1127 | |
Dmitry Mishin | 3fdadf7 | 2006-03-20 22:45:21 -0800 | [diff] [blame] | 1128 | static int rawv6_getsockopt(struct sock *sk, int level, int optname, |
| 1129 | char __user *optval, int __user *optlen) |
| 1130 | { |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1131 | switch (level) { |
| 1132 | case SOL_RAW: |
| 1133 | break; |
Dmitry Mishin | 3fdadf7 | 2006-03-20 22:45:21 -0800 | [diff] [blame] | 1134 | |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1135 | case SOL_ICMPV6: |
| 1136 | if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6) |
| 1137 | return -EOPNOTSUPP; |
| 1138 | return rawv6_geticmpfilter(sk, level, optname, optval, optlen); |
| 1139 | case SOL_IPV6: |
Hannes Frederic Sowa | 715f504 | 2015-12-16 17:22:47 +0100 | [diff] [blame] | 1140 | if (optname == IPV6_CHECKSUM || |
| 1141 | optname == IPV6_HDRINCL) |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1142 | break; |
Joe Perches | a8eceea | 2020-03-12 15:50:22 -0700 | [diff] [blame] | 1143 | fallthrough; |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1144 | default: |
| 1145 | return ipv6_getsockopt(sk, level, optname, optval, optlen); |
Stephen Hemminger | 3ff50b7 | 2007-04-20 17:09:22 -0700 | [diff] [blame] | 1146 | } |
| 1147 | |
Dmitry Mishin | 3fdadf7 | 2006-03-20 22:45:21 -0800 | [diff] [blame] | 1148 | return do_rawv6_getsockopt(sk, level, optname, optval, optlen); |
| 1149 | } |
| 1150 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1151 | static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg) |
| 1152 | { |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1153 | switch (cmd) { |
| 1154 | case SIOCOUTQ: { |
| 1155 | int amount = sk_wmem_alloc_get(sk); |
Eric Dumazet | 31e6d36 | 2009-06-17 19:05:41 -0700 | [diff] [blame] | 1156 | |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1157 | return put_user(amount, (int __user *)arg); |
| 1158 | } |
| 1159 | case SIOCINQ: { |
| 1160 | struct sk_buff *skb; |
| 1161 | int amount = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1162 | |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1163 | spin_lock_bh(&sk->sk_receive_queue.lock); |
| 1164 | skb = skb_peek(&sk->sk_receive_queue); |
Ian Morris | 53b24b8 | 2015-03-29 14:00:05 +0100 | [diff] [blame] | 1165 | if (skb) |
Jamie Bainbridge | 105f552 | 2017-04-26 10:43:27 +1000 | [diff] [blame] | 1166 | amount = skb->len; |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1167 | spin_unlock_bh(&sk->sk_receive_queue.lock); |
| 1168 | return put_user(amount, (int __user *)arg); |
| 1169 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1170 | |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1171 | default: |
YOSHIFUJI Hideaki | 7bc570c | 2008-04-03 09:22:53 +0900 | [diff] [blame] | 1172 | #ifdef CONFIG_IPV6_MROUTE |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1173 | return ip6mr_ioctl(sk, cmd, (void __user *)arg); |
YOSHIFUJI Hideaki | 7bc570c | 2008-04-03 09:22:53 +0900 | [diff] [blame] | 1174 | #else |
Joe Perches | 207ec0a | 2011-07-01 09:43:08 +0000 | [diff] [blame] | 1175 | return -ENOIOCTLCMD; |
YOSHIFUJI Hideaki | 7bc570c | 2008-04-03 09:22:53 +0900 | [diff] [blame] | 1176 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1177 | } |
| 1178 | } |
| 1179 | |
David S. Miller | e2d5776 | 2011-02-03 17:59:32 -0800 | [diff] [blame] | 1180 | #ifdef CONFIG_COMPAT |
| 1181 | static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg) |
| 1182 | { |
| 1183 | switch (cmd) { |
| 1184 | case SIOCOUTQ: |
| 1185 | case SIOCINQ: |
| 1186 | return -ENOIOCTLCMD; |
| 1187 | default: |
| 1188 | #ifdef CONFIG_IPV6_MROUTE |
| 1189 | return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg)); |
| 1190 | #else |
| 1191 | return -ENOIOCTLCMD; |
| 1192 | #endif |
| 1193 | } |
| 1194 | } |
| 1195 | #endif |
| 1196 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1197 | static void rawv6_close(struct sock *sk, long timeout) |
| 1198 | { |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 1199 | if (inet_sk(sk)->inet_num == IPPROTO_RAW) |
Denis V. Lunev | 725a8ff | 2008-07-19 00:28:58 -0700 | [diff] [blame] | 1200 | ip6_ra_control(sk, -1); |
YOSHIFUJI Hideaki | 7bc570c | 2008-04-03 09:22:53 +0900 | [diff] [blame] | 1201 | ip6mr_sk_done(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1202 | sk_common_release(sk); |
| 1203 | } |
| 1204 | |
Brian Haley | 7d06b2e | 2008-06-14 17:04:49 -0700 | [diff] [blame] | 1205 | static void raw6_destroy(struct sock *sk) |
Denis V. Lunev | 22dd485 | 2008-06-04 15:16:12 -0700 | [diff] [blame] | 1206 | { |
| 1207 | lock_sock(sk); |
| 1208 | ip6_flush_pending_frames(sk); |
| 1209 | release_sock(sk); |
David S. Miller | f23d60d | 2008-06-12 14:47:58 -0700 | [diff] [blame] | 1210 | |
Brian Haley | 7d06b2e | 2008-06-14 17:04:49 -0700 | [diff] [blame] | 1211 | inet6_destroy_sock(sk); |
Denis V. Lunev | 22dd485 | 2008-06-04 15:16:12 -0700 | [diff] [blame] | 1212 | } |
| 1213 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1214 | static int rawv6_init_sk(struct sock *sk) |
| 1215 | { |
Masahide NAKAMURA | f48d5ff | 2007-02-07 00:07:39 -0800 | [diff] [blame] | 1216 | struct raw6_sock *rp = raw6_sk(sk); |
| 1217 | |
Eric Dumazet | c720c7e8 | 2009-10-15 06:30:45 +0000 | [diff] [blame] | 1218 | switch (inet_sk(sk)->inet_num) { |
Masahide NAKAMURA | f48d5ff | 2007-02-07 00:07:39 -0800 | [diff] [blame] | 1219 | case IPPROTO_ICMPV6: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1220 | rp->checksum = 1; |
| 1221 | rp->offset = 2; |
Masahide NAKAMURA | f48d5ff | 2007-02-07 00:07:39 -0800 | [diff] [blame] | 1222 | break; |
| 1223 | case IPPROTO_MH: |
| 1224 | rp->checksum = 1; |
| 1225 | rp->offset = 4; |
| 1226 | break; |
| 1227 | default: |
| 1228 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1229 | } |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 1230 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1231 | } |
| 1232 | |
| 1233 | struct proto rawv6_prot = { |
Arnaldo Carvalho de Melo | 543d9cf | 2006-03-20 22:48:35 -0800 | [diff] [blame] | 1234 | .name = "RAWv6", |
| 1235 | .owner = THIS_MODULE, |
| 1236 | .close = rawv6_close, |
Denis V. Lunev | 22dd485 | 2008-06-04 15:16:12 -0700 | [diff] [blame] | 1237 | .destroy = raw6_destroy, |
Hannes Frederic Sowa | 82b276c | 2014-01-20 05:16:39 +0100 | [diff] [blame] | 1238 | .connect = ip6_datagram_connect_v6_only, |
Eric Dumazet | 286c72d | 2016-10-20 09:39:40 -0700 | [diff] [blame] | 1239 | .disconnect = __udp_disconnect, |
Arnaldo Carvalho de Melo | 543d9cf | 2006-03-20 22:48:35 -0800 | [diff] [blame] | 1240 | .ioctl = rawv6_ioctl, |
| 1241 | .init = rawv6_init_sk, |
Arnaldo Carvalho de Melo | 543d9cf | 2006-03-20 22:48:35 -0800 | [diff] [blame] | 1242 | .setsockopt = rawv6_setsockopt, |
| 1243 | .getsockopt = rawv6_getsockopt, |
| 1244 | .sendmsg = rawv6_sendmsg, |
| 1245 | .recvmsg = rawv6_recvmsg, |
| 1246 | .bind = rawv6_bind, |
| 1247 | .backlog_rcv = rawv6_rcv_skb, |
Pavel Emelyanov | fc8717b | 2008-03-22 16:56:51 -0700 | [diff] [blame] | 1248 | .hash = raw_hash_sk, |
| 1249 | .unhash = raw_unhash_sk, |
Arnaldo Carvalho de Melo | 543d9cf | 2006-03-20 22:48:35 -0800 | [diff] [blame] | 1250 | .obj_size = sizeof(struct raw6_sock), |
David Windsor | 8c2bc89 | 2017-08-24 16:49:14 -0700 | [diff] [blame] | 1251 | .useroffset = offsetof(struct raw6_sock, filter), |
| 1252 | .usersize = sizeof_field(struct raw6_sock, filter), |
Pavel Emelyanov | fc8717b | 2008-03-22 16:56:51 -0700 | [diff] [blame] | 1253 | .h.raw_hash = &raw_v6_hashinfo, |
Dmitry Mishin | 3fdadf7 | 2006-03-20 22:45:21 -0800 | [diff] [blame] | 1254 | #ifdef CONFIG_COMPAT |
David S. Miller | e2d5776 | 2011-02-03 17:59:32 -0800 | [diff] [blame] | 1255 | .compat_ioctl = compat_rawv6_ioctl, |
Dmitry Mishin | 3fdadf7 | 2006-03-20 22:45:21 -0800 | [diff] [blame] | 1256 | #endif |
Cyrill Gorcunov | 432490f | 2016-10-21 13:03:44 +0300 | [diff] [blame] | 1257 | .diag_destroy = raw_abort, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1258 | }; |
| 1259 | |
| 1260 | #ifdef CONFIG_PROC_FS |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1261 | static int raw6_seq_show(struct seq_file *seq, void *v) |
| 1262 | { |
Lorenzo Colitti | 17ef66af | 2013-05-31 15:05:48 +0000 | [diff] [blame] | 1263 | if (v == SEQ_START_TOKEN) { |
| 1264 | seq_puts(seq, IPV6_SEQ_DGRAM_HEADER); |
| 1265 | } else { |
| 1266 | struct sock *sp = v; |
| 1267 | __u16 srcp = inet_sk(sp)->inet_num; |
| 1268 | ip6_dgram_sock_seq_show(seq, v, srcp, 0, |
| 1269 | raw_seq_private(seq)->bucket); |
| 1270 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1271 | return 0; |
| 1272 | } |
| 1273 | |
Philippe De Muyter | 56b3d97 | 2007-07-10 23:07:31 -0700 | [diff] [blame] | 1274 | static const struct seq_operations raw6_seq_ops = { |
Pavel Emelyanov | 42a7380 | 2007-11-19 22:38:33 -0800 | [diff] [blame] | 1275 | .start = raw_seq_start, |
| 1276 | .next = raw_seq_next, |
| 1277 | .stop = raw_seq_stop, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1278 | .show = raw6_seq_show, |
| 1279 | }; |
| 1280 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 1281 | static int __net_init raw6_init_net(struct net *net) |
Pavel Emelyanov | a308da1 | 2008-01-14 05:36:50 -0800 | [diff] [blame] | 1282 | { |
Christoph Hellwig | c350637 | 2018-04-10 19:42:55 +0200 | [diff] [blame] | 1283 | if (!proc_create_net_data("raw6", 0444, net->proc_net, &raw6_seq_ops, |
| 1284 | sizeof(struct raw_iter_state), &raw_v6_hashinfo)) |
Pavel Emelyanov | a308da1 | 2008-01-14 05:36:50 -0800 | [diff] [blame] | 1285 | return -ENOMEM; |
| 1286 | |
| 1287 | return 0; |
| 1288 | } |
| 1289 | |
Alexey Dobriyan | 2c8c1e7 | 2010-01-17 03:35:32 +0000 | [diff] [blame] | 1290 | static void __net_exit raw6_exit_net(struct net *net) |
Pavel Emelyanov | a308da1 | 2008-01-14 05:36:50 -0800 | [diff] [blame] | 1291 | { |
Gao feng | ece31ff | 2013-02-18 01:34:56 +0000 | [diff] [blame] | 1292 | remove_proc_entry("raw6", net->proc_net); |
Pavel Emelyanov | a308da1 | 2008-01-14 05:36:50 -0800 | [diff] [blame] | 1293 | } |
| 1294 | |
| 1295 | static struct pernet_operations raw6_net_ops = { |
| 1296 | .init = raw6_init_net, |
| 1297 | .exit = raw6_exit_net, |
| 1298 | }; |
| 1299 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1300 | int __init raw6_proc_init(void) |
| 1301 | { |
Pavel Emelyanov | a308da1 | 2008-01-14 05:36:50 -0800 | [diff] [blame] | 1302 | return register_pernet_subsys(&raw6_net_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | } |
| 1304 | |
| 1305 | void raw6_proc_exit(void) |
| 1306 | { |
Pavel Emelyanov | a308da1 | 2008-01-14 05:36:50 -0800 | [diff] [blame] | 1307 | unregister_pernet_subsys(&raw6_net_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | } |
| 1309 | #endif /* CONFIG_PROC_FS */ |
Daniel Lezcano | 7f4e486 | 2007-12-11 02:25:35 -0800 | [diff] [blame] | 1310 | |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 1311 | /* Same as inet6_dgram_ops, sans udp_poll. */ |
Eric Dumazet | 77d4b1d | 2017-06-03 09:29:25 -0700 | [diff] [blame] | 1312 | const struct proto_ops inet6_sockraw_ops = { |
Daniel Lezcano | 7f4e486 | 2007-12-11 02:25:35 -0800 | [diff] [blame] | 1313 | .family = PF_INET6, |
| 1314 | .owner = THIS_MODULE, |
| 1315 | .release = inet6_release, |
| 1316 | .bind = inet6_bind, |
| 1317 | .connect = inet_dgram_connect, /* ok */ |
| 1318 | .socketpair = sock_no_socketpair, /* a do nothing */ |
| 1319 | .accept = sock_no_accept, /* a do nothing */ |
| 1320 | .getname = inet6_getname, |
Linus Torvalds | a11e1d4 | 2018-06-28 09:43:44 -0700 | [diff] [blame] | 1321 | .poll = datagram_poll, /* ok */ |
Daniel Lezcano | 7f4e486 | 2007-12-11 02:25:35 -0800 | [diff] [blame] | 1322 | .ioctl = inet6_ioctl, /* must change */ |
Arnd Bergmann | c7cbdbf | 2019-04-17 22:51:48 +0200 | [diff] [blame] | 1323 | .gettstamp = sock_gettstamp, |
Daniel Lezcano | 7f4e486 | 2007-12-11 02:25:35 -0800 | [diff] [blame] | 1324 | .listen = sock_no_listen, /* ok */ |
| 1325 | .shutdown = inet_shutdown, /* ok */ |
| 1326 | .setsockopt = sock_common_setsockopt, /* ok */ |
| 1327 | .getsockopt = sock_common_getsockopt, /* ok */ |
| 1328 | .sendmsg = inet_sendmsg, /* ok */ |
| 1329 | .recvmsg = sock_common_recvmsg, /* ok */ |
| 1330 | .mmap = sock_no_mmap, |
| 1331 | .sendpage = sock_no_sendpage, |
| 1332 | #ifdef CONFIG_COMPAT |
Christoph Hellwig | 3986912 | 2020-05-18 08:28:06 +0200 | [diff] [blame] | 1333 | .compat_ioctl = inet6_compat_ioctl, |
Daniel Lezcano | 7f4e486 | 2007-12-11 02:25:35 -0800 | [diff] [blame] | 1334 | #endif |
| 1335 | }; |
| 1336 | |
| 1337 | static struct inet_protosw rawv6_protosw = { |
| 1338 | .type = SOCK_RAW, |
| 1339 | .protocol = IPPROTO_IP, /* wild card */ |
| 1340 | .prot = &rawv6_prot, |
| 1341 | .ops = &inet6_sockraw_ops, |
Daniel Lezcano | 7f4e486 | 2007-12-11 02:25:35 -0800 | [diff] [blame] | 1342 | .flags = INET_PROTOSW_REUSE, |
| 1343 | }; |
| 1344 | |
| 1345 | int __init rawv6_init(void) |
| 1346 | { |
Julia Lawall | 3d2f6d4 | 2015-05-28 23:02:17 +0200 | [diff] [blame] | 1347 | return inet6_register_protosw(&rawv6_protosw); |
Daniel Lezcano | 7f4e486 | 2007-12-11 02:25:35 -0800 | [diff] [blame] | 1348 | } |
| 1349 | |
Daniel Lezcano | 09f7709 | 2007-12-13 05:34:58 -0800 | [diff] [blame] | 1350 | void rawv6_exit(void) |
Daniel Lezcano | 7f4e486 | 2007-12-11 02:25:35 -0800 | [diff] [blame] | 1351 | { |
| 1352 | inet6_unregister_protosw(&rawv6_protosw); |
| 1353 | } |