Thomas Gleixner | 2874c5f | 2019-05-27 08:55:01 +0200 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Linux INET6 implementation |
| 4 | * |
| 5 | * Authors: |
| 6 | * Pedro Roque <roque@di.fc.ul.pt> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #ifndef _NET_IPV6_H |
| 10 | #define _NET_IPV6_H |
| 11 | |
| 12 | #include <linux/ipv6.h> |
| 13 | #include <linux/hardirq.h> |
Eric Dumazet | 08dcdbf | 2013-02-21 12:18:52 +0000 | [diff] [blame] | 14 | #include <linux/jhash.h> |
Reshetova, Elena | 0aeea21 | 2017-07-04 09:34:54 +0300 | [diff] [blame] | 15 | #include <linux/refcount.h> |
Willem de Bruijn | 59c820b | 2019-07-07 05:34:45 -0400 | [diff] [blame] | 16 | #include <linux/jump_label_ratelimit.h> |
Herbert Xu | 20283d8 | 2007-07-30 17:05:49 -0700 | [diff] [blame] | 17 | #include <net/if_inet6.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <net/ndisc.h> |
| 19 | #include <net/flow.h> |
Jiri Pirko | 1bd758e | 2015-05-12 14:56:07 +0200 | [diff] [blame] | 20 | #include <net/flow_dissector.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <net/snmp.h> |
Martin KaFai Lau | f0b1e64 | 2017-12-01 12:52:30 -0800 | [diff] [blame] | 22 | #include <net/netns/hash.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
| 24 | #define SIN6_LEN_RFC2133 24 |
| 25 | |
| 26 | #define IPV6_MAXPLEN 65535 |
| 27 | |
| 28 | /* |
| 29 | * NextHeader field of IPv6 header |
| 30 | */ |
| 31 | |
| 32 | #define NEXTHDR_HOP 0 /* Hop-by-hop option header. */ |
| 33 | #define NEXTHDR_TCP 6 /* TCP segment. */ |
| 34 | #define NEXTHDR_UDP 17 /* UDP message. */ |
| 35 | #define NEXTHDR_IPV6 41 /* IPv6 in IPv6 */ |
| 36 | #define NEXTHDR_ROUTING 43 /* Routing header. */ |
| 37 | #define NEXTHDR_FRAGMENT 44 /* Fragmentation/reassembly header. */ |
xeb@mail.ru | c12b395 | 2012-08-10 00:51:50 +0000 | [diff] [blame] | 38 | #define NEXTHDR_GRE 47 /* GRE header. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #define NEXTHDR_ESP 50 /* Encapsulating security payload. */ |
| 40 | #define NEXTHDR_AUTH 51 /* Authentication header. */ |
| 41 | #define NEXTHDR_ICMP 58 /* ICMP for IPv6. */ |
| 42 | #define NEXTHDR_NONE 59 /* No next header */ |
| 43 | #define NEXTHDR_DEST 60 /* Destination options header. */ |
Joe Stringer | 280c571 | 2013-07-23 13:37:45 +0900 | [diff] [blame] | 44 | #define NEXTHDR_SCTP 132 /* SCTP message. */ |
Masahide NAKAMURA | 2b74165 | 2006-08-23 20:34:26 -0700 | [diff] [blame] | 45 | #define NEXTHDR_MOBILITY 135 /* Mobility header. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | |
| 47 | #define NEXTHDR_MAX 255 |
| 48 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | #define IPV6_DEFAULT_HOPLIMIT 64 |
| 50 | #define IPV6_DEFAULT_MCASTHOPS 1 |
| 51 | |
Tom Herbert | 47d3d7a | 2017-10-30 14:16:00 -0700 | [diff] [blame] | 52 | /* Limits on Hop-by-Hop and Destination options. |
| 53 | * |
| 54 | * Per RFC8200 there is no limit on the maximum number or lengths of options in |
| 55 | * Hop-by-Hop or Destination options other then the packet must fit in an MTU. |
| 56 | * We allow configurable limits in order to mitigate potential denial of |
| 57 | * service attacks. |
| 58 | * |
| 59 | * There are three limits that may be set: |
| 60 | * - Limit the number of options in a Hop-by-Hop or Destination options |
| 61 | * extension header |
| 62 | * - Limit the byte length of a Hop-by-Hop or Destination options extension |
| 63 | * header |
| 64 | * - Disallow unknown options |
| 65 | * |
| 66 | * The limits are expressed in corresponding sysctls: |
| 67 | * |
| 68 | * ipv6.sysctl.max_dst_opts_cnt |
| 69 | * ipv6.sysctl.max_hbh_opts_cnt |
| 70 | * ipv6.sysctl.max_dst_opts_len |
| 71 | * ipv6.sysctl.max_hbh_opts_len |
| 72 | * |
| 73 | * max_*_opts_cnt is the number of TLVs that are allowed for Destination |
| 74 | * options or Hop-by-Hop options. If the number is less than zero then unknown |
| 75 | * TLVs are disallowed and the number of known options that are allowed is the |
| 76 | * absolute value. Setting the value to INT_MAX indicates no limit. |
| 77 | * |
| 78 | * max_*_opts_len is the length limit in bytes of a Destination or |
| 79 | * Hop-by-Hop options extension header. Setting the value to INT_MAX |
| 80 | * indicates no length limit. |
| 81 | * |
| 82 | * If a limit is exceeded when processing an extension header the packet is |
| 83 | * silently discarded. |
| 84 | */ |
| 85 | |
| 86 | /* Default limits for Hop-by-Hop and Destination options */ |
| 87 | #define IP6_DEFAULT_MAX_DST_OPTS_CNT 8 |
| 88 | #define IP6_DEFAULT_MAX_HBH_OPTS_CNT 8 |
| 89 | #define IP6_DEFAULT_MAX_DST_OPTS_LEN INT_MAX /* No limit */ |
| 90 | #define IP6_DEFAULT_MAX_HBH_OPTS_LEN INT_MAX /* No limit */ |
| 91 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | /* |
| 93 | * Addr type |
| 94 | * |
| 95 | * type - unicast | multicast |
| 96 | * scope - local | site | global |
| 97 | * v4 - compat |
| 98 | * v4mapped |
| 99 | * any |
| 100 | * loopback |
| 101 | */ |
| 102 | |
| 103 | #define IPV6_ADDR_ANY 0x0000U |
| 104 | |
Stephen Hemminger | 82695b3 | 2018-02-27 15:48:21 -0800 | [diff] [blame] | 105 | #define IPV6_ADDR_UNICAST 0x0001U |
| 106 | #define IPV6_ADDR_MULTICAST 0x0002U |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 107 | |
| 108 | #define IPV6_ADDR_LOOPBACK 0x0010U |
| 109 | #define IPV6_ADDR_LINKLOCAL 0x0020U |
| 110 | #define IPV6_ADDR_SITELOCAL 0x0040U |
| 111 | |
| 112 | #define IPV6_ADDR_COMPATv4 0x0080U |
| 113 | |
| 114 | #define IPV6_ADDR_SCOPE_MASK 0x00f0U |
| 115 | |
| 116 | #define IPV6_ADDR_MAPPED 0x1000U |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
| 118 | /* |
| 119 | * Addr scopes |
| 120 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | #define IPV6_ADDR_MC_SCOPE(a) \ |
| 122 | ((a)->s6_addr[1] & 0x0f) /* nonstandard */ |
| 123 | #define __IPV6_ADDR_SCOPE_INVALID -1 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | #define IPV6_ADDR_SCOPE_NODELOCAL 0x01 |
| 125 | #define IPV6_ADDR_SCOPE_LINKLOCAL 0x02 |
| 126 | #define IPV6_ADDR_SCOPE_SITELOCAL 0x05 |
| 127 | #define IPV6_ADDR_SCOPE_ORGLOCAL 0x08 |
| 128 | #define IPV6_ADDR_SCOPE_GLOBAL 0x0e |
| 129 | |
| 130 | /* |
Linus Lüssing | 5ced133 | 2011-02-15 13:19:20 +0000 | [diff] [blame] | 131 | * Addr flags |
| 132 | */ |
Linus Lüssing | 5ced133 | 2011-02-15 13:19:20 +0000 | [diff] [blame] | 133 | #define IPV6_ADDR_MC_FLAG_TRANSIENT(a) \ |
| 134 | ((a)->s6_addr[1] & 0x10) |
| 135 | #define IPV6_ADDR_MC_FLAG_PREFIX(a) \ |
| 136 | ((a)->s6_addr[1] & 0x20) |
| 137 | #define IPV6_ADDR_MC_FLAG_RENDEZVOUS(a) \ |
| 138 | ((a)->s6_addr[1] & 0x40) |
Linus Lüssing | 5ced133 | 2011-02-15 13:19:20 +0000 | [diff] [blame] | 139 | |
| 140 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 141 | * fragmentation header |
| 142 | */ |
| 143 | |
| 144 | struct frag_hdr { |
Al Viro | 44473a6 | 2006-11-08 00:21:46 -0800 | [diff] [blame] | 145 | __u8 nexthdr; |
| 146 | __u8 reserved; |
| 147 | __be16 frag_off; |
| 148 | __be32 identification; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | }; |
| 150 | |
Paul Durrant | 1431fb3 | 2013-12-03 17:39:29 +0000 | [diff] [blame] | 151 | #define IP6_MF 0x0001 |
| 152 | #define IP6_OFFSET 0xFFF8 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | |
Pablo Neira Ayuso | 0feca61 | 2019-05-29 13:25:32 +0200 | [diff] [blame] | 154 | struct ip6_fraglist_iter { |
| 155 | struct ipv6hdr *tmp_hdr; |
Pablo Neira Ayuso | 0feca61 | 2019-05-29 13:25:32 +0200 | [diff] [blame] | 156 | struct sk_buff *frag; |
| 157 | int offset; |
| 158 | unsigned int hlen; |
| 159 | __be32 frag_id; |
| 160 | u8 nexthdr; |
| 161 | }; |
| 162 | |
| 163 | int ip6_fraglist_init(struct sk_buff *skb, unsigned int hlen, u8 *prevhdr, |
| 164 | u8 nexthdr, __be32 frag_id, |
| 165 | struct ip6_fraglist_iter *iter); |
| 166 | void ip6_fraglist_prepare(struct sk_buff *skb, struct ip6_fraglist_iter *iter); |
| 167 | |
| 168 | static inline struct sk_buff *ip6_fraglist_next(struct ip6_fraglist_iter *iter) |
| 169 | { |
| 170 | struct sk_buff *skb = iter->frag; |
| 171 | |
| 172 | iter->frag = skb->next; |
| 173 | skb_mark_not_on_list(skb); |
| 174 | |
| 175 | return skb; |
| 176 | } |
| 177 | |
Pablo Neira Ayuso | 8a6a1f1 | 2019-05-29 13:25:34 +0200 | [diff] [blame] | 178 | struct ip6_frag_state { |
| 179 | u8 *prevhdr; |
| 180 | unsigned int hlen; |
| 181 | unsigned int mtu; |
| 182 | unsigned int left; |
| 183 | int offset; |
| 184 | int ptr; |
| 185 | int hroom; |
| 186 | int troom; |
| 187 | __be32 frag_id; |
| 188 | u8 nexthdr; |
| 189 | }; |
| 190 | |
| 191 | void ip6_frag_init(struct sk_buff *skb, unsigned int hlen, unsigned int mtu, |
| 192 | unsigned short needed_tailroom, int hdr_room, u8 *prevhdr, |
| 193 | u8 nexthdr, __be32 frag_id, struct ip6_frag_state *state); |
| 194 | struct sk_buff *ip6_frag_next(struct sk_buff *skb, |
| 195 | struct ip6_frag_state *state); |
| 196 | |
Lorenzo Colitti | e110861 | 2014-05-13 10:17:33 -0700 | [diff] [blame] | 197 | #define IP6_REPLY_MARK(net, mark) \ |
| 198 | ((net)->ipv6.sysctl.fwmark_reflect ? (mark) : 0) |
| 199 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 200 | #include <net/sock.h> |
| 201 | |
| 202 | /* sysctls */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 203 | extern int sysctl_mld_max_msf; |
Hannes Frederic Sowa | 2f71193 | 2014-09-02 15:49:25 +0200 | [diff] [blame] | 204 | extern int sysctl_mld_qrv; |
Pavel Emelyanov | 3d7cc2b | 2008-01-09 00:33:11 -0800 | [diff] [blame] | 205 | |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 206 | #define _DEVINC(net, statname, mod, idev, field) \ |
David L Stevens | 14878f7 | 2007-09-16 16:52:35 -0700 | [diff] [blame] | 207 | ({ \ |
| 208 | struct inet6_dev *_idev = (idev); \ |
| 209 | if (likely(_idev != NULL)) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 210 | mod##SNMP_INC_STATS64((_idev)->stats.statname, (field));\ |
| 211 | mod##SNMP_INC_STATS64((net)->mib.statname##_statistics, (field));\ |
David L Stevens | 14878f7 | 2007-09-16 16:52:35 -0700 | [diff] [blame] | 212 | }) |
| 213 | |
Eric Dumazet | be281e5 | 2011-05-19 01:14:23 +0000 | [diff] [blame] | 214 | /* per device counters are atomic_long_t */ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 215 | #define _DEVINCATOMIC(net, statname, mod, idev, field) \ |
Eric Dumazet | be281e5 | 2011-05-19 01:14:23 +0000 | [diff] [blame] | 216 | ({ \ |
| 217 | struct inet6_dev *_idev = (idev); \ |
| 218 | if (likely(_idev != NULL)) \ |
| 219 | SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 220 | mod##SNMP_INC_STATS((net)->mib.statname##_statistics, (field));\ |
Eric Dumazet | be281e5 | 2011-05-19 01:14:23 +0000 | [diff] [blame] | 221 | }) |
| 222 | |
Eric Dumazet | 2a24444 | 2011-11-13 01:24:04 +0000 | [diff] [blame] | 223 | /* per device and per net counters are atomic_long_t */ |
| 224 | #define _DEVINC_ATOMIC_ATOMIC(net, statname, idev, field) \ |
| 225 | ({ \ |
| 226 | struct inet6_dev *_idev = (idev); \ |
| 227 | if (likely(_idev != NULL)) \ |
| 228 | SNMP_INC_STATS_ATOMIC_LONG((_idev)->stats.statname##dev, (field)); \ |
| 229 | SNMP_INC_STATS_ATOMIC_LONG((net)->mib.statname##_statistics, (field));\ |
| 230 | }) |
| 231 | |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 232 | #define _DEVADD(net, statname, mod, idev, field, val) \ |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 233 | ({ \ |
| 234 | struct inet6_dev *_idev = (idev); \ |
| 235 | if (likely(_idev != NULL)) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 236 | mod##SNMP_ADD_STATS((_idev)->stats.statname, (field), (val)); \ |
| 237 | mod##SNMP_ADD_STATS((net)->mib.statname##_statistics, (field), (val));\ |
Pavel Emelyanov | 8e7999c | 2007-10-15 02:40:06 -0700 | [diff] [blame] | 238 | }) |
| 239 | |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 240 | #define _DEVUPD(net, statname, mod, idev, field, val) \ |
Neil Horman | edf391f | 2009-04-27 02:45:02 -0700 | [diff] [blame] | 241 | ({ \ |
| 242 | struct inet6_dev *_idev = (idev); \ |
| 243 | if (likely(_idev != NULL)) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 244 | mod##SNMP_UPD_PO_STATS((_idev)->stats.statname, field, (val)); \ |
| 245 | mod##SNMP_UPD_PO_STATS((net)->mib.statname##_statistics, field, (val));\ |
Neil Horman | edf391f | 2009-04-27 02:45:02 -0700 | [diff] [blame] | 246 | }) |
| 247 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | /* MIBs */ |
David L Stevens | 14878f7 | 2007-09-16 16:52:35 -0700 | [diff] [blame] | 249 | |
Denis V. Lunev | 087fe24 | 2008-10-08 10:35:11 -0700 | [diff] [blame] | 250 | #define IP6_INC_STATS(net, idev,field) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 251 | _DEVINC(net, ipv6, , idev, field) |
Eric Dumazet | 1d01550 | 2016-04-27 16:44:40 -0700 | [diff] [blame] | 252 | #define __IP6_INC_STATS(net, idev,field) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 253 | _DEVINC(net, ipv6, __, idev, field) |
Neil Horman | edf391f | 2009-04-27 02:45:02 -0700 | [diff] [blame] | 254 | #define IP6_ADD_STATS(net, idev,field,val) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 255 | _DEVADD(net, ipv6, , idev, field, val) |
Eric Dumazet | 1d01550 | 2016-04-27 16:44:40 -0700 | [diff] [blame] | 256 | #define __IP6_ADD_STATS(net, idev,field,val) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 257 | _DEVADD(net, ipv6, __, idev, field, val) |
Neil Horman | edf391f | 2009-04-27 02:45:02 -0700 | [diff] [blame] | 258 | #define IP6_UPD_PO_STATS(net, idev,field,val) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 259 | _DEVUPD(net, ipv6, , idev, field, val) |
Eric Dumazet | c2005eb | 2016-04-27 16:44:41 -0700 | [diff] [blame] | 260 | #define __IP6_UPD_PO_STATS(net, idev,field,val) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 261 | _DEVUPD(net, ipv6, __, idev, field, val) |
Denis V. Lunev | 087fe24 | 2008-10-08 10:35:11 -0700 | [diff] [blame] | 262 | #define ICMP6_INC_STATS(net, idev, field) \ |
Eric Dumazet | be281e5 | 2011-05-19 01:14:23 +0000 | [diff] [blame] | 263 | _DEVINCATOMIC(net, icmpv6, , idev, field) |
Eric Dumazet | a16292a | 2016-04-27 16:44:36 -0700 | [diff] [blame] | 264 | #define __ICMP6_INC_STATS(net, idev, field) \ |
Eric Dumazet | 13415e4 | 2016-04-27 16:44:43 -0700 | [diff] [blame] | 265 | _DEVINCATOMIC(net, icmpv6, __, idev, field) |
David L Stevens | 14878f7 | 2007-09-16 16:52:35 -0700 | [diff] [blame] | 266 | |
Denis V. Lunev | 087fe24 | 2008-10-08 10:35:11 -0700 | [diff] [blame] | 267 | #define ICMP6MSGOUT_INC_STATS(net, idev, field) \ |
Eric Dumazet | 2a24444 | 2011-11-13 01:24:04 +0000 | [diff] [blame] | 268 | _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field +256) |
Eric Dumazet | f3832ed | 2016-04-27 16:44:42 -0700 | [diff] [blame] | 269 | #define ICMP6MSGIN_INC_STATS(net, idev, field) \ |
Eric Dumazet | 2a24444 | 2011-11-13 01:24:04 +0000 | [diff] [blame] | 270 | _DEVINC_ATOMIC_ATOMIC(net, icmpv6msg, idev, field) |
David L Stevens | 14878f7 | 2007-09-16 16:52:35 -0700 | [diff] [blame] | 271 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 272 | struct ip6_ra_chain { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 273 | struct ip6_ra_chain *next; |
| 274 | struct sock *sk; |
| 275 | int sel; |
| 276 | void (*destructor)(struct sock *); |
| 277 | }; |
| 278 | |
| 279 | extern struct ip6_ra_chain *ip6_ra_chain; |
| 280 | extern rwlock_t ip6_ra_lock; |
| 281 | |
| 282 | /* |
| 283 | This structure is prepared by protocol, when parsing |
| 284 | ancillary data and passed to IPv6. |
| 285 | */ |
| 286 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 287 | struct ipv6_txoptions { |
Reshetova, Elena | 0aeea21 | 2017-07-04 09:34:54 +0300 | [diff] [blame] | 288 | refcount_t refcnt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | /* Length of this structure */ |
| 290 | int tot_len; |
| 291 | |
| 292 | /* length of extension headers */ |
| 293 | |
| 294 | __u16 opt_flen; /* after fragment hdr */ |
| 295 | __u16 opt_nflen; /* before fragment hdr */ |
| 296 | |
| 297 | struct ipv6_opt_hdr *hopopt; |
| 298 | struct ipv6_opt_hdr *dst0opt; |
| 299 | struct ipv6_rt_hdr *srcrt; /* Routing Header */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | struct ipv6_opt_hdr *dst1opt; |
Eric Dumazet | 45f6fad | 2015-11-29 19:37:57 -0800 | [diff] [blame] | 301 | struct rcu_head rcu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 302 | /* Option buffer, as read by IPV6_PKTOPTIONS, starts here. */ |
| 303 | }; |
| 304 | |
Eric Dumazet | a346abe | 2019-07-01 06:39:36 -0700 | [diff] [blame] | 305 | /* flowlabel_reflect sysctl values */ |
| 306 | enum flowlabel_reflect { |
| 307 | FLOWLABEL_REFLECT_ESTABLISHED = 1, |
| 308 | FLOWLABEL_REFLECT_TCP_RESET = 2, |
| 309 | FLOWLABEL_REFLECT_ICMPV6_ECHO_REPLIES = 4, |
| 310 | }; |
| 311 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 312 | struct ip6_flowlabel { |
Eric Dumazet | 7f0e44a | 2013-03-07 04:20:32 +0000 | [diff] [blame] | 313 | struct ip6_flowlabel __rcu *next; |
Al Viro | 90bcaf7 | 2006-11-08 00:25:17 -0800 | [diff] [blame] | 314 | __be32 label; |
Eric Dumazet | db3459d | 2007-05-03 17:39:04 -0700 | [diff] [blame] | 315 | atomic_t users; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | struct in6_addr dst; |
| 317 | struct ipv6_txoptions *opt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 318 | unsigned long linger; |
YOSHIFUJI Hideaki / 吉藤英明 | d3aedd5 | 2013-01-30 09:27:47 +0000 | [diff] [blame] | 319 | struct rcu_head rcu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | u8 share; |
Eric W. Biederman | 4f82f45 | 2012-05-24 10:37:59 -0600 | [diff] [blame] | 321 | union { |
| 322 | struct pid *pid; |
| 323 | kuid_t uid; |
| 324 | } owner; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | unsigned long lastuse; |
| 326 | unsigned long expires; |
Benjamin Thery | 60e8fbc | 2008-03-26 16:53:08 -0700 | [diff] [blame] | 327 | struct net *fl_net; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 328 | }; |
| 329 | |
Tom Herbert | 82a584b | 2015-04-29 15:33:21 -0700 | [diff] [blame] | 330 | #define IPV6_FLOWINFO_MASK cpu_to_be32(0x0FFFFFFF) |
| 331 | #define IPV6_FLOWLABEL_MASK cpu_to_be32(0x000FFFFF) |
| 332 | #define IPV6_FLOWLABEL_STATELESS_FLAG cpu_to_be32(0x00080000) |
| 333 | |
Florent Fourcot | 37cfee9 | 2013-12-08 15:46:58 +0100 | [diff] [blame] | 334 | #define IPV6_TCLASS_MASK (IPV6_FLOWINFO_MASK & ~IPV6_FLOWLABEL_MASK) |
Li RongQing | d76ed22 | 2014-01-15 17:03:30 +0800 | [diff] [blame] | 335 | #define IPV6_TCLASS_SHIFT 20 |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 336 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 337 | struct ipv6_fl_socklist { |
Eric Dumazet | 7f0e44a | 2013-03-07 04:20:32 +0000 | [diff] [blame] | 338 | struct ipv6_fl_socklist __rcu *next; |
| 339 | struct ip6_flowlabel *fl; |
| 340 | struct rcu_head rcu; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | }; |
| 342 | |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 343 | struct ipcm6_cookie { |
Willem de Bruijn | 5fdaa88 | 2018-07-06 10:12:57 -0400 | [diff] [blame] | 344 | struct sockcm_cookie sockc; |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 345 | __s16 hlimit; |
| 346 | __s16 tclass; |
| 347 | __s8 dontfrag; |
| 348 | struct ipv6_txoptions *opt; |
Willem de Bruijn | bec1f6f | 2018-04-26 13:42:17 -0400 | [diff] [blame] | 349 | __u16 gso_size; |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 350 | }; |
| 351 | |
Willem de Bruijn | b515430a | 2018-07-06 10:12:55 -0400 | [diff] [blame] | 352 | static inline void ipcm6_init(struct ipcm6_cookie *ipc6) |
| 353 | { |
| 354 | *ipc6 = (struct ipcm6_cookie) { |
| 355 | .hlimit = -1, |
| 356 | .tclass = -1, |
| 357 | .dontfrag = -1, |
| 358 | }; |
| 359 | } |
| 360 | |
| 361 | static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6, |
| 362 | const struct ipv6_pinfo *np) |
| 363 | { |
| 364 | *ipc6 = (struct ipcm6_cookie) { |
| 365 | .hlimit = -1, |
| 366 | .tclass = np->tclass, |
| 367 | .dontfrag = np->dontfrag, |
| 368 | }; |
| 369 | } |
| 370 | |
Eric Dumazet | 45f6fad | 2015-11-29 19:37:57 -0800 | [diff] [blame] | 371 | static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np) |
| 372 | { |
| 373 | struct ipv6_txoptions *opt; |
| 374 | |
| 375 | rcu_read_lock(); |
| 376 | opt = rcu_dereference(np->opt); |
Benjamin Poirier | e550785c | 2016-02-17 16:20:33 -0800 | [diff] [blame] | 377 | if (opt) { |
Reshetova, Elena | 0aeea21 | 2017-07-04 09:34:54 +0300 | [diff] [blame] | 378 | if (!refcount_inc_not_zero(&opt->refcnt)) |
Benjamin Poirier | e550785c | 2016-02-17 16:20:33 -0800 | [diff] [blame] | 379 | opt = NULL; |
| 380 | else |
| 381 | opt = rcu_pointer_handoff(opt); |
| 382 | } |
Eric Dumazet | 45f6fad | 2015-11-29 19:37:57 -0800 | [diff] [blame] | 383 | rcu_read_unlock(); |
| 384 | return opt; |
| 385 | } |
| 386 | |
| 387 | static inline void txopt_put(struct ipv6_txoptions *opt) |
| 388 | { |
Reshetova, Elena | 0aeea21 | 2017-07-04 09:34:54 +0300 | [diff] [blame] | 389 | if (opt && refcount_dec_and_test(&opt->refcnt)) |
Eric Dumazet | 45f6fad | 2015-11-29 19:37:57 -0800 | [diff] [blame] | 390 | kfree_rcu(opt, rcu); |
| 391 | } |
| 392 | |
Willem de Bruijn | 59c820b | 2019-07-07 05:34:45 -0400 | [diff] [blame] | 393 | struct ip6_flowlabel *__fl6_sock_lookup(struct sock *sk, __be32 label); |
| 394 | |
| 395 | extern struct static_key_false_deferred ipv6_flowlabel_exclusive; |
| 396 | static inline struct ip6_flowlabel *fl6_sock_lookup(struct sock *sk, |
| 397 | __be32 label) |
| 398 | { |
| 399 | if (static_branch_unlikely(&ipv6_flowlabel_exclusive.key)) |
| 400 | return __fl6_sock_lookup(sk, label) ? : ERR_PTR(-ENOENT); |
| 401 | |
| 402 | return NULL; |
| 403 | } |
| 404 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 405 | struct ipv6_txoptions *fl6_merge_options(struct ipv6_txoptions *opt_space, |
| 406 | struct ip6_flowlabel *fl, |
| 407 | struct ipv6_txoptions *fopt); |
| 408 | void fl6_free_socklist(struct sock *sk); |
Christoph Hellwig | 8629828 | 2020-07-23 08:09:01 +0200 | [diff] [blame] | 409 | int ipv6_flowlabel_opt(struct sock *sk, sockptr_t optval, int optlen); |
Florent Fourcot | 46e5f40 | 2014-01-17 17:15:04 +0100 | [diff] [blame] | 410 | int ipv6_flowlabel_opt_get(struct sock *sk, struct in6_flowlabel_req *freq, |
| 411 | int flags); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 412 | int ip6_flowlabel_init(void); |
| 413 | void ip6_flowlabel_cleanup(void); |
Ben Hutchings | e9191ff | 2018-01-22 20:06:42 +0000 | [diff] [blame] | 414 | bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | |
| 416 | static inline void fl6_sock_release(struct ip6_flowlabel *fl) |
| 417 | { |
| 418 | if (fl) |
| 419 | atomic_dec(&fl->users); |
| 420 | } |
| 421 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 422 | void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info); |
David S. Miller | b94f1c0 | 2012-07-12 00:33:37 -0700 | [diff] [blame] | 423 | |
Joe Perches | 4e64b1e | 2017-10-05 23:46:14 -0700 | [diff] [blame] | 424 | void icmpv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6, |
| 425 | struct icmp6hdr *thdr, int len); |
Lorenzo Colitti | 6d0bfe2 | 2013-05-22 20:17:31 +0000 | [diff] [blame] | 426 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 427 | int ip6_ra_control(struct sock *sk, int sel); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 428 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 429 | int ipv6_parse_hopopts(struct sk_buff *skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 430 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 431 | struct ipv6_txoptions *ipv6_dup_options(struct sock *sk, |
| 432 | struct ipv6_txoptions *opt); |
| 433 | struct ipv6_txoptions *ipv6_renew_options(struct sock *sk, |
| 434 | struct ipv6_txoptions *opt, |
| 435 | int newtype, |
Paul Moore | a9ba23d | 2018-07-04 09:58:05 -0400 | [diff] [blame] | 436 | struct ipv6_opt_hdr *newopt); |
YOSHIFUJI Hideaki | df9890c | 2005-11-20 12:23:18 +0900 | [diff] [blame] | 437 | struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space, |
| 438 | struct ipv6_txoptions *opt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | |
Eric Dumazet | a224772 | 2014-09-27 09:50:56 -0700 | [diff] [blame] | 440 | bool ipv6_opt_accepted(const struct sock *sk, const struct sk_buff *skb, |
| 441 | const struct inet6_skb_parm *opt); |
Huw Davies | ceba183 | 2016-06-27 15:02:51 -0400 | [diff] [blame] | 442 | struct ipv6_txoptions *ipv6_update_options(struct sock *sk, |
| 443 | struct ipv6_txoptions *opt); |
Arnaldo Carvalho de Melo | 399c07d | 2005-12-13 23:24:28 -0800 | [diff] [blame] | 444 | |
Shmulik Ladkani | aeaf6e9 | 2012-11-30 10:25:59 +0000 | [diff] [blame] | 445 | static inline bool ipv6_accept_ra(struct inet6_dev *idev) |
| 446 | { |
| 447 | /* If forwarding is enabled, RA are not accepted unless the special |
| 448 | * hybrid mode (accept_ra=2) is enabled. |
| 449 | */ |
| 450 | return idev->cnf.forwarding ? idev->cnf.accept_ra == 2 : |
| 451 | idev->cnf.accept_ra; |
| 452 | } |
| 453 | |
Jesper Dangaard Brouer | c2a9366 | 2013-01-15 07:16:35 +0000 | [diff] [blame] | 454 | #define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */ |
| 455 | #define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */ |
Joe Perches | 9874c41 | 2010-02-16 18:40:04 +0000 | [diff] [blame] | 456 | #define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 458 | int __ipv6_addr_type(const struct in6_addr *addr); |
YOSHIFUJI Hideaki | b1cacb6 | 2005-11-08 09:38:12 -0800 | [diff] [blame] | 459 | static inline int ipv6_addr_type(const struct in6_addr *addr) |
| 460 | { |
| 461 | return __ipv6_addr_type(addr) & 0xffff; |
| 462 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 463 | |
| 464 | static inline int ipv6_addr_scope(const struct in6_addr *addr) |
| 465 | { |
YOSHIFUJI Hideaki | b1cacb6 | 2005-11-08 09:38:12 -0800 | [diff] [blame] | 466 | return __ipv6_addr_type(addr) & IPV6_ADDR_SCOPE_MASK; |
| 467 | } |
| 468 | |
| 469 | static inline int __ipv6_addr_src_scope(int type) |
| 470 | { |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 471 | return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16); |
YOSHIFUJI Hideaki | b1cacb6 | 2005-11-08 09:38:12 -0800 | [diff] [blame] | 472 | } |
| 473 | |
| 474 | static inline int ipv6_addr_src_scope(const struct in6_addr *addr) |
| 475 | { |
| 476 | return __ipv6_addr_src_scope(__ipv6_addr_type(addr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 477 | } |
| 478 | |
Hannes Frederic Sowa | b7ef213 | 2013-03-08 02:07:16 +0000 | [diff] [blame] | 479 | static inline bool __ipv6_addr_needs_scope_id(int type) |
| 480 | { |
| 481 | return type & IPV6_ADDR_LINKLOCAL || |
| 482 | (type & IPV6_ADDR_MULTICAST && |
| 483 | (type & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL))); |
| 484 | } |
| 485 | |
| 486 | static inline __u32 ipv6_iface_scope_id(const struct in6_addr *addr, int iface) |
| 487 | { |
| 488 | return __ipv6_addr_needs_scope_id(__ipv6_addr_type(addr)) ? iface : 0; |
| 489 | } |
| 490 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 491 | static inline int ipv6_addr_cmp(const struct in6_addr *a1, const struct in6_addr *a2) |
| 492 | { |
Eric Dumazet | db3459d | 2007-05-03 17:39:04 -0700 | [diff] [blame] | 493 | return memcmp(a1, a2, sizeof(struct in6_addr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 494 | } |
| 495 | |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 496 | static inline bool |
Patrick McHardy | f2ffd9e | 2006-03-20 18:03:16 -0800 | [diff] [blame] | 497 | ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, |
| 498 | const struct in6_addr *a2) |
| 499 | { |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 500 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 501 | const unsigned long *ul1 = (const unsigned long *)a1; |
| 502 | const unsigned long *ulm = (const unsigned long *)m; |
| 503 | const unsigned long *ul2 = (const unsigned long *)a2; |
| 504 | |
| 505 | return !!(((ul1[0] ^ ul2[0]) & ulm[0]) | |
| 506 | ((ul1[1] ^ ul2[1]) & ulm[1])); |
| 507 | #else |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 508 | return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | |
| 509 | ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | |
| 510 | ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | |
| 511 | ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 512 | #endif |
Patrick McHardy | f2ffd9e | 2006-03-20 18:03:16 -0800 | [diff] [blame] | 513 | } |
| 514 | |
Stephen Hemminger | 82695b3 | 2018-02-27 15:48:21 -0800 | [diff] [blame] | 515 | static inline void ipv6_addr_prefix(struct in6_addr *pfx, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 516 | const struct in6_addr *addr, |
| 517 | int plen) |
| 518 | { |
| 519 | /* caller must guarantee 0 <= plen <= 128 */ |
| 520 | int o = plen >> 3, |
| 521 | b = plen & 0x7; |
| 522 | |
Eric Dumazet | db3459d | 2007-05-03 17:39:04 -0700 | [diff] [blame] | 523 | memset(pfx->s6_addr, 0, sizeof(pfx->s6_addr)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | memcpy(pfx->s6_addr, addr, o); |
Eric Dumazet | db3459d | 2007-05-03 17:39:04 -0700 | [diff] [blame] | 525 | if (b != 0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | pfx->s6_addr[o] = addr->s6_addr[o] & (0xff00 >> b); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 527 | } |
| 528 | |
Alexander Aring | 818f1f3 | 2015-12-09 22:46:31 +0100 | [diff] [blame] | 529 | static inline void ipv6_addr_prefix_copy(struct in6_addr *addr, |
| 530 | const struct in6_addr *pfx, |
| 531 | int plen) |
| 532 | { |
| 533 | /* caller must guarantee 0 <= plen <= 128 */ |
| 534 | int o = plen >> 3, |
| 535 | b = plen & 0x7; |
| 536 | |
| 537 | memcpy(addr->s6_addr, pfx, o); |
| 538 | if (b != 0) { |
| 539 | addr->s6_addr[o] &= ~(0xff00 >> b); |
| 540 | addr->s6_addr[o] |= (pfx->s6_addr[o] & (0xff00 >> b)); |
| 541 | } |
| 542 | } |
| 543 | |
YOSHIFUJI Hideaki / 吉藤英明 | 5206c57 | 2013-01-14 07:10:24 +0000 | [diff] [blame] | 544 | static inline void __ipv6_addr_set_half(__be32 *addr, |
| 545 | __be32 wh, __be32 wl) |
| 546 | { |
| 547 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 548 | #if defined(__BIG_ENDIAN) |
| 549 | if (__builtin_constant_p(wh) && __builtin_constant_p(wl)) { |
| 550 | *(__force u64 *)addr = ((__force u64)(wh) << 32 | (__force u64)(wl)); |
| 551 | return; |
| 552 | } |
| 553 | #elif defined(__LITTLE_ENDIAN) |
| 554 | if (__builtin_constant_p(wl) && __builtin_constant_p(wh)) { |
| 555 | *(__force u64 *)addr = ((__force u64)(wl) << 32 | (__force u64)(wh)); |
| 556 | return; |
| 557 | } |
| 558 | #endif |
| 559 | #endif |
| 560 | addr[0] = wh; |
| 561 | addr[1] = wl; |
| 562 | } |
| 563 | |
Stephen Hemminger | 82695b3 | 2018-02-27 15:48:21 -0800 | [diff] [blame] | 564 | static inline void ipv6_addr_set(struct in6_addr *addr, |
Al Viro | 48818f8 | 2006-09-27 18:44:54 -0700 | [diff] [blame] | 565 | __be32 w1, __be32 w2, |
| 566 | __be32 w3, __be32 w4) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 567 | { |
YOSHIFUJI Hideaki / 吉藤英明 | 5206c57 | 2013-01-14 07:10:24 +0000 | [diff] [blame] | 568 | __ipv6_addr_set_half(&addr->s6_addr32[0], w1, w2); |
| 569 | __ipv6_addr_set_half(&addr->s6_addr32[2], w3, w4); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 570 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 571 | |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 572 | static inline bool ipv6_addr_equal(const struct in6_addr *a1, |
| 573 | const struct in6_addr *a2) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 574 | { |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 575 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 576 | const unsigned long *ul1 = (const unsigned long *)a1; |
| 577 | const unsigned long *ul2 = (const unsigned long *)a2; |
| 578 | |
| 579 | return ((ul1[0] ^ ul2[0]) | (ul1[1] ^ ul2[1])) == 0UL; |
| 580 | #else |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 581 | return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | |
| 582 | (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | |
| 583 | (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | |
| 584 | (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 585 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 586 | } |
| 587 | |
YOSHIFUJI Hideaki / 吉藤英明 | 3867517 | 2013-01-14 07:10:38 +0000 | [diff] [blame] | 588 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 589 | static inline bool __ipv6_prefix_equal64_half(const __be64 *a1, |
| 590 | const __be64 *a2, |
| 591 | unsigned int len) |
| 592 | { |
Fabio Baltieri | 512613d | 2013-01-16 22:30:17 +0100 | [diff] [blame] | 593 | if (len && ((*a1 ^ *a2) & cpu_to_be64((~0UL) << (64 - len)))) |
YOSHIFUJI Hideaki / 吉藤英明 | 3867517 | 2013-01-14 07:10:38 +0000 | [diff] [blame] | 594 | return false; |
| 595 | return true; |
| 596 | } |
| 597 | |
| 598 | static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, |
| 599 | const struct in6_addr *addr2, |
| 600 | unsigned int prefixlen) |
| 601 | { |
| 602 | const __be64 *a1 = (const __be64 *)addr1; |
| 603 | const __be64 *a2 = (const __be64 *)addr2; |
| 604 | |
| 605 | if (prefixlen >= 64) { |
| 606 | if (a1[0] ^ a2[0]) |
| 607 | return false; |
| 608 | return __ipv6_prefix_equal64_half(a1 + 1, a2 + 1, prefixlen - 64); |
| 609 | } |
| 610 | return __ipv6_prefix_equal64_half(a1, a2, prefixlen); |
| 611 | } |
| 612 | #else |
YOSHIFUJI Hideaki / 吉藤英明 | 2ef9733 | 2013-01-14 07:10:31 +0000 | [diff] [blame] | 613 | static inline bool ipv6_prefix_equal(const struct in6_addr *addr1, |
| 614 | const struct in6_addr *addr2, |
| 615 | unsigned int prefixlen) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | { |
YOSHIFUJI Hideaki / 吉藤英明 | 2ef9733 | 2013-01-14 07:10:31 +0000 | [diff] [blame] | 617 | const __be32 *a1 = addr1->s6_addr32; |
| 618 | const __be32 *a2 = addr2->s6_addr32; |
Eric Dumazet | 95c9617 | 2012-04-15 05:58:06 +0000 | [diff] [blame] | 619 | unsigned int pdw, pbi; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 620 | |
| 621 | /* check complete u32 in prefix */ |
| 622 | pdw = prefixlen >> 5; |
| 623 | if (pdw && memcmp(a1, a2, pdw << 2)) |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 624 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | |
| 626 | /* check incomplete u32 in prefix */ |
| 627 | pbi = prefixlen & 0x1f; |
| 628 | if (pbi && ((a1[pdw] ^ a2[pdw]) & htonl((0xffffffff) << (32 - pbi)))) |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 629 | return false; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 630 | |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 631 | return true; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 632 | } |
YOSHIFUJI Hideaki / 吉藤英明 | 3867517 | 2013-01-14 07:10:38 +0000 | [diff] [blame] | 633 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 635 | static inline bool ipv6_addr_any(const struct in6_addr *a) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 636 | { |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 637 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 638 | const unsigned long *ul = (const unsigned long *)a; |
| 639 | |
| 640 | return (ul[0] | ul[1]) == 0UL; |
| 641 | #else |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 642 | return (a->s6_addr32[0] | a->s6_addr32[1] | |
| 643 | a->s6_addr32[2] | a->s6_addr32[3]) == 0; |
Eric Dumazet | 1a203cb | 2012-07-10 19:05:57 +0000 | [diff] [blame] | 644 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 645 | } |
| 646 | |
Eric Dumazet | ddbe503 | 2012-07-18 08:11:12 +0000 | [diff] [blame] | 647 | static inline u32 ipv6_addr_hash(const struct in6_addr *a) |
| 648 | { |
| 649 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 650 | const unsigned long *ul = (const unsigned long *)a; |
| 651 | unsigned long x = ul[0] ^ ul[1]; |
| 652 | |
| 653 | return (u32)(x ^ (x >> 32)); |
| 654 | #else |
| 655 | return (__force u32)(a->s6_addr32[0] ^ a->s6_addr32[1] ^ |
| 656 | a->s6_addr32[2] ^ a->s6_addr32[3]); |
| 657 | #endif |
| 658 | } |
| 659 | |
Eric Dumazet | 08dcdbf | 2013-02-21 12:18:52 +0000 | [diff] [blame] | 660 | /* more secured version of ipv6_addr_hash() */ |
Hannes Frederic Sowa | b50026b | 2013-10-19 21:48:52 +0200 | [diff] [blame] | 661 | static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval) |
Eric Dumazet | 08dcdbf | 2013-02-21 12:18:52 +0000 | [diff] [blame] | 662 | { |
| 663 | u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1]; |
| 664 | |
| 665 | return jhash_3words(v, |
| 666 | (__force u32)a->s6_addr32[2], |
| 667 | (__force u32)a->s6_addr32[3], |
Hannes Frederic Sowa | b50026b | 2013-10-19 21:48:52 +0200 | [diff] [blame] | 668 | initval); |
Eric Dumazet | 08dcdbf | 2013-02-21 12:18:52 +0000 | [diff] [blame] | 669 | } |
| 670 | |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 671 | static inline bool ipv6_addr_loopback(const struct in6_addr *a) |
YOSHIFUJI Hideaki | f630e43 | 2008-06-19 16:33:57 -0700 | [diff] [blame] | 672 | { |
YOSHIFUJI Hideaki / 吉藤英明 | e287656 | 2013-01-14 07:10:06 +0000 | [diff] [blame] | 673 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 674 | const __be64 *be = (const __be64 *)a; |
YOSHIFUJI Hideaki / 吉藤英明 | e287656 | 2013-01-14 07:10:06 +0000 | [diff] [blame] | 675 | |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 676 | return (be[0] | (be[1] ^ cpu_to_be64(1))) == 0UL; |
YOSHIFUJI Hideaki / 吉藤英明 | e287656 | 2013-01-14 07:10:06 +0000 | [diff] [blame] | 677 | #else |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 678 | return (a->s6_addr32[0] | a->s6_addr32[1] | |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 679 | a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0; |
YOSHIFUJI Hideaki / 吉藤英明 | e287656 | 2013-01-14 07:10:06 +0000 | [diff] [blame] | 680 | #endif |
YOSHIFUJI Hideaki | f630e43 | 2008-06-19 16:33:57 -0700 | [diff] [blame] | 681 | } |
| 682 | |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 683 | /* |
| 684 | * Note that we must __force cast these to unsigned long to make sparse happy, |
| 685 | * since all of the endian-annotated types are fixed size regardless of arch. |
| 686 | */ |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 687 | static inline bool ipv6_addr_v4mapped(const struct in6_addr *a) |
Brian Haley | e773e4f | 2007-08-24 23:16:08 -0700 | [diff] [blame] | 688 | { |
YOSHIFUJI Hideaki / 吉藤英明 | a04d40b | 2013-01-14 07:10:14 +0000 | [diff] [blame] | 689 | return ( |
| 690 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 691 | *(unsigned long *)a | |
YOSHIFUJI Hideaki / 吉藤英明 | a04d40b | 2013-01-14 07:10:14 +0000 | [diff] [blame] | 692 | #else |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 693 | (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) | |
YOSHIFUJI Hideaki / 吉藤英明 | a04d40b | 2013-01-14 07:10:14 +0000 | [diff] [blame] | 694 | #endif |
Jeff Layton | 1373a77 | 2014-07-16 06:55:46 -0400 | [diff] [blame] | 695 | (__force unsigned long)(a->s6_addr32[2] ^ |
| 696 | cpu_to_be32(0x0000ffff))) == 0UL; |
Brian Haley | e773e4f | 2007-08-24 23:16:08 -0700 | [diff] [blame] | 697 | } |
| 698 | |
Eric Dumazet | be2644a | 2019-10-01 10:49:06 -0700 | [diff] [blame] | 699 | static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a) |
| 700 | { |
| 701 | return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]); |
| 702 | } |
| 703 | |
Martin KaFai Lau | f0b1e64 | 2017-12-01 12:52:30 -0800 | [diff] [blame] | 704 | static inline u32 ipv6_portaddr_hash(const struct net *net, |
| 705 | const struct in6_addr *addr6, |
| 706 | unsigned int port) |
| 707 | { |
| 708 | unsigned int hash, mix = net_hash_mix(net); |
| 709 | |
| 710 | if (ipv6_addr_any(addr6)) |
| 711 | hash = jhash_1word(0, mix); |
| 712 | else if (ipv6_addr_v4mapped(addr6)) |
| 713 | hash = jhash_1word((__force u32)addr6->s6_addr32[3], mix); |
| 714 | else |
| 715 | hash = jhash2((__force u32 *)addr6->s6_addr32, 4, mix); |
| 716 | |
| 717 | return hash ^ port; |
| 718 | } |
| 719 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 | /* |
Juha-Matti Tapio | 99cd07a | 2008-02-28 20:55:46 -0800 | [diff] [blame] | 721 | * Check for a RFC 4843 ORCHID address |
| 722 | * (Overlay Routable Cryptographic Hash Identifiers) |
| 723 | */ |
Eric Dumazet | 92113bf | 2012-05-18 08:14:11 +0200 | [diff] [blame] | 724 | static inline bool ipv6_addr_orchid(const struct in6_addr *a) |
Juha-Matti Tapio | 99cd07a | 2008-02-28 20:55:46 -0800 | [diff] [blame] | 725 | { |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 726 | return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010); |
Juha-Matti Tapio | 99cd07a | 2008-02-28 20:55:46 -0800 | [diff] [blame] | 727 | } |
| 728 | |
Lorenzo Colitti | 5c98631 | 2014-04-29 11:57:34 +0900 | [diff] [blame] | 729 | static inline bool ipv6_addr_is_multicast(const struct in6_addr *addr) |
| 730 | { |
| 731 | return (addr->s6_addr32[0] & htonl(0xFF000000)) == htonl(0xFF000000); |
| 732 | } |
| 733 | |
Aurélien Charbon | f15364b | 2008-01-18 15:50:56 +0100 | [diff] [blame] | 734 | static inline void ipv6_addr_set_v4mapped(const __be32 addr, |
| 735 | struct in6_addr *v4mapped) |
| 736 | { |
| 737 | ipv6_addr_set(v4mapped, |
| 738 | 0, 0, |
| 739 | htonl(0x0000FFFF), |
| 740 | addr); |
| 741 | } |
| 742 | |
Juha-Matti Tapio | 99cd07a | 2008-02-28 20:55:46 -0800 | [diff] [blame] | 743 | /* |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 744 | * find the first different bit between two addresses |
| 745 | * length of address must be a multiple of 32bits |
| 746 | */ |
YOSHIFUJI Hideaki / 吉藤英明 | 9f2e733 | 2013-01-14 07:09:54 +0000 | [diff] [blame] | 747 | static inline int __ipv6_addr_diff32(const void *token1, const void *token2, int addrlen) |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 748 | { |
Al Viro | ef296f5 | 2006-11-14 20:56:33 -0800 | [diff] [blame] | 749 | const __be32 *a1 = token1, *a2 = token2; |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 750 | int i; |
| 751 | |
| 752 | addrlen >>= 2; |
| 753 | |
| 754 | for (i = 0; i < addrlen; i++) { |
Al Viro | ef296f5 | 2006-11-14 20:56:33 -0800 | [diff] [blame] | 755 | __be32 xb = a1[i] ^ a2[i]; |
| 756 | if (xb) |
YOSHIFUJI Hideaki / 吉藤英明 | d57b8fb | 2010-03-29 06:00:05 +0000 | [diff] [blame] | 757 | return i * 32 + 31 - __fls(ntohl(xb)); |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 758 | } |
| 759 | |
| 760 | /* |
Stephen Hemminger | 82695b3 | 2018-02-27 15:48:21 -0800 | [diff] [blame] | 761 | * we should *never* get to this point since that |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 762 | * would mean the addrs are equal |
| 763 | * |
| 764 | * However, we do get to it 8) And exacly, when |
| 765 | * addresses are equal 8) |
| 766 | * |
| 767 | * ip route add 1111::/128 via ... |
| 768 | * ip route add 1111::/64 via ... |
| 769 | * and we are here. |
| 770 | * |
| 771 | * Ideally, this function should stop comparison |
| 772 | * at prefix length. It does not, but it is still OK, |
| 773 | * if returned value is greater than prefix length. |
| 774 | * --ANK (980803) |
| 775 | */ |
Eric Dumazet | a02cec2 | 2010-09-22 20:43:57 +0000 | [diff] [blame] | 776 | return addrlen << 5; |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 777 | } |
| 778 | |
YOSHIFUJI Hideaki / 吉藤英明 | 9f2e733 | 2013-01-14 07:09:54 +0000 | [diff] [blame] | 779 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 780 | static inline int __ipv6_addr_diff64(const void *token1, const void *token2, int addrlen) |
| 781 | { |
| 782 | const __be64 *a1 = token1, *a2 = token2; |
| 783 | int i; |
| 784 | |
| 785 | addrlen >>= 3; |
| 786 | |
| 787 | for (i = 0; i < addrlen; i++) { |
| 788 | __be64 xb = a1[i] ^ a2[i]; |
| 789 | if (xb) |
| 790 | return i * 64 + 63 - __fls(be64_to_cpu(xb)); |
| 791 | } |
| 792 | |
| 793 | return addrlen << 6; |
| 794 | } |
| 795 | #endif |
| 796 | |
| 797 | static inline int __ipv6_addr_diff(const void *token1, const void *token2, int addrlen) |
| 798 | { |
| 799 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 |
| 800 | if (__builtin_constant_p(addrlen) && !(addrlen & 7)) |
| 801 | return __ipv6_addr_diff64(token1, token2, addrlen); |
| 802 | #endif |
| 803 | return __ipv6_addr_diff32(token1, token2, addrlen); |
| 804 | } |
| 805 | |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 806 | static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2) |
| 807 | { |
| 808 | return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); |
| 809 | } |
| 810 | |
Eric Dumazet | 7f15986 | 2015-05-25 16:02:21 -0700 | [diff] [blame] | 811 | __be32 ipv6_select_ident(struct net *net, |
| 812 | const struct in6_addr *daddr, |
| 813 | const struct in6_addr *saddr); |
Willem de Bruijn | 0c19f846 | 2017-11-21 10:22:25 -0500 | [diff] [blame] | 814 | __be32 ipv6_proxy_select_ident(struct net *net, struct sk_buff *skb); |
Ben Hutchings | 5188cd4 | 2014-10-30 18:27:17 +0000 | [diff] [blame] | 815 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 816 | int ip6_dst_hoplimit(struct dst_entry *dst); |
Cong Wang | 3ce9b35 | 2013-08-31 13:44:28 +0800 | [diff] [blame] | 817 | |
Lorenzo Colitti | 5c98631 | 2014-04-29 11:57:34 +0900 | [diff] [blame] | 818 | static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, |
| 819 | struct dst_entry *dst) |
| 820 | { |
| 821 | int hlimit; |
| 822 | |
| 823 | if (ipv6_addr_is_multicast(&fl6->daddr)) |
| 824 | hlimit = np->mcast_hops; |
| 825 | else |
| 826 | hlimit = np->hop_limit; |
| 827 | if (hlimit < 0) |
| 828 | hlimit = ip6_dst_hoplimit(dst); |
| 829 | return hlimit; |
| 830 | } |
| 831 | |
Tom Herbert | c3f8324 | 2015-06-04 09:16:40 -0700 | [diff] [blame] | 832 | /* copy IPv6 saddr & daddr to flow_keys, possibly using 64bit load/store |
| 833 | * Equivalent to : flow->v6addrs.src = iph->saddr; |
| 834 | * flow->v6addrs.dst = iph->daddr; |
| 835 | */ |
| 836 | static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow, |
| 837 | const struct ipv6hdr *iph) |
| 838 | { |
| 839 | BUILD_BUG_ON(offsetof(typeof(flow->addrs), v6addrs.dst) != |
| 840 | offsetof(typeof(flow->addrs), v6addrs.src) + |
| 841 | sizeof(flow->addrs.v6addrs.src)); |
| 842 | memcpy(&flow->addrs.v6addrs, &iph->saddr, sizeof(flow->addrs.v6addrs)); |
| 843 | flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; |
| 844 | } |
| 845 | |
Florian Fainelli | a37934fc | 2014-07-08 11:15:03 -0700 | [diff] [blame] | 846 | #if IS_ENABLED(CONFIG_IPV6) |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 847 | |
Vincent Bernat | db57dc7 | 2018-08-01 22:05:10 +0200 | [diff] [blame] | 848 | static inline bool ipv6_can_nonlocal_bind(struct net *net, |
| 849 | struct inet_sock *inet) |
| 850 | { |
| 851 | return net->ipv6.sysctl.ip_nonlocal_bind || |
| 852 | inet->freebind || inet->transparent; |
| 853 | } |
| 854 | |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 855 | /* Sysctl settings for net ipv6.auto_flowlabels */ |
| 856 | #define IP6_AUTO_FLOW_LABEL_OFF 0 |
| 857 | #define IP6_AUTO_FLOW_LABEL_OPTOUT 1 |
| 858 | #define IP6_AUTO_FLOW_LABEL_OPTIN 2 |
| 859 | #define IP6_AUTO_FLOW_LABEL_FORCED 3 |
| 860 | |
| 861 | #define IP6_AUTO_FLOW_LABEL_MAX IP6_AUTO_FLOW_LABEL_FORCED |
| 862 | |
Tom Herbert | b567741 | 2015-07-31 16:52:14 -0700 | [diff] [blame] | 863 | #define IP6_DEFAULT_AUTO_FLOW_LABELS IP6_AUTO_FLOW_LABEL_OPTOUT |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 864 | |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 865 | static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, |
Tom Herbert | 67800f9 | 2015-07-31 16:52:11 -0700 | [diff] [blame] | 866 | __be32 flowlabel, bool autolabel, |
| 867 | struct flowi6 *fl6) |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 868 | { |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 869 | u32 hash; |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 870 | |
Dimitris Michailidis | 90427ef | 2017-01-30 14:09:42 -0800 | [diff] [blame] | 871 | /* @flowlabel may include more than a flow label, eg, the traffic class. |
| 872 | * Here we want only the flow label value. |
| 873 | */ |
| 874 | flowlabel &= IPV6_FLOWLABEL_MASK; |
| 875 | |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 876 | if (flowlabel || |
| 877 | net->ipv6.sysctl.auto_flowlabels == IP6_AUTO_FLOW_LABEL_OFF || |
| 878 | (!autolabel && |
| 879 | net->ipv6.sysctl.auto_flowlabels != IP6_AUTO_FLOW_LABEL_FORCED)) |
| 880 | return flowlabel; |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 881 | |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 882 | hash = skb_get_hash_flowi6(skb, fl6); |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 883 | |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 884 | /* Since this is being sent on the wire obfuscate hash a bit |
| 885 | * to minimize possbility that any useful information to an |
| 886 | * attacker is leaked. Only lower 20 bits are relevant. |
| 887 | */ |
Colin Ian King | 169dc02 | 2018-07-17 17:12:39 +0100 | [diff] [blame] | 888 | hash = rol32(hash, 16); |
Tom Herbert | 82a584b | 2015-04-29 15:33:21 -0700 | [diff] [blame] | 889 | |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 890 | flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK; |
| 891 | |
| 892 | if (net->ipv6.sysctl.flowlabel_state_ranges) |
| 893 | flowlabel |= IPV6_FLOWLABEL_STATELESS_FLAG; |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 894 | |
| 895 | return flowlabel; |
| 896 | } |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 897 | |
| 898 | static inline int ip6_default_np_autolabel(struct net *net) |
| 899 | { |
| 900 | switch (net->ipv6.sysctl.auto_flowlabels) { |
| 901 | case IP6_AUTO_FLOW_LABEL_OFF: |
| 902 | case IP6_AUTO_FLOW_LABEL_OPTIN: |
| 903 | default: |
| 904 | return 0; |
| 905 | case IP6_AUTO_FLOW_LABEL_OPTOUT: |
| 906 | case IP6_AUTO_FLOW_LABEL_FORCED: |
| 907 | return 1; |
| 908 | } |
| 909 | } |
Florian Fainelli | a37934fc | 2014-07-08 11:15:03 -0700 | [diff] [blame] | 910 | #else |
Florian Fainelli | a37934fc | 2014-07-08 11:15:03 -0700 | [diff] [blame] | 911 | static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb, |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 912 | __be32 flowlabel, bool autolabel, |
| 913 | struct flowi6 *fl6) |
Florian Fainelli | a37934fc | 2014-07-08 11:15:03 -0700 | [diff] [blame] | 914 | { |
| 915 | return flowlabel; |
| 916 | } |
Tom Herbert | 4224090 | 2015-07-31 16:52:12 -0700 | [diff] [blame] | 917 | static inline int ip6_default_np_autolabel(struct net *net) |
| 918 | { |
| 919 | return 0; |
| 920 | } |
Florian Fainelli | a37934fc | 2014-07-08 11:15:03 -0700 | [diff] [blame] | 921 | #endif |
| 922 | |
Petr Machata | 918ee50 | 2018-03-11 09:45:47 +0200 | [diff] [blame] | 923 | #if IS_ENABLED(CONFIG_IPV6) |
| 924 | static inline int ip6_multipath_hash_policy(const struct net *net) |
| 925 | { |
| 926 | return net->ipv6.sysctl.multipath_hash_policy; |
| 927 | } |
| 928 | #else |
| 929 | static inline int ip6_multipath_hash_policy(const struct net *net) |
| 930 | { |
| 931 | return 0; |
| 932 | } |
| 933 | #endif |
Tom Herbert | cb1ce2e | 2014-07-01 21:33:10 -0700 | [diff] [blame] | 934 | |
YOSHIFUJI Hideaki | 971f359 | 2005-11-08 09:37:56 -0800 | [diff] [blame] | 935 | /* |
YOSHIFUJI Hideaki / 吉藤英明 | 3e4e4c1 | 2013-01-13 05:01:39 +0000 | [diff] [blame] | 936 | * Header manipulation |
| 937 | */ |
| 938 | static inline void ip6_flow_hdr(struct ipv6hdr *hdr, unsigned int tclass, |
| 939 | __be32 flowlabel) |
| 940 | { |
YOSHIFUJI Hideaki | 07f623d | 2013-01-17 12:10:57 +0900 | [diff] [blame] | 941 | *(__be32 *)hdr = htonl(0x60000000 | (tclass << 20)) | flowlabel; |
YOSHIFUJI Hideaki / 吉藤英明 | 3e4e4c1 | 2013-01-13 05:01:39 +0000 | [diff] [blame] | 942 | } |
| 943 | |
YOSHIFUJI Hideaki / 吉藤英明 | 6502ca5 | 2013-01-13 05:01:51 +0000 | [diff] [blame] | 944 | static inline __be32 ip6_flowinfo(const struct ipv6hdr *hdr) |
| 945 | { |
| 946 | return *(__be32 *)hdr & IPV6_FLOWINFO_MASK; |
| 947 | } |
| 948 | |
Florent Fourcot | 3308de2 | 2013-12-08 15:47:00 +0100 | [diff] [blame] | 949 | static inline __be32 ip6_flowlabel(const struct ipv6hdr *hdr) |
| 950 | { |
| 951 | return *(__be32 *)hdr & IPV6_FLOWLABEL_MASK; |
| 952 | } |
| 953 | |
Li RongQing | d76ed22 | 2014-01-15 17:03:30 +0800 | [diff] [blame] | 954 | static inline u8 ip6_tclass(__be32 flowinfo) |
| 955 | { |
| 956 | return ntohl(flowinfo & IPV6_TCLASS_MASK) >> IPV6_TCLASS_SHIFT; |
| 957 | } |
Daniel Borkmann | eaa93bf | 2016-03-18 18:37:57 +0100 | [diff] [blame] | 958 | |
| 959 | static inline __be32 ip6_make_flowinfo(unsigned int tclass, __be32 flowlabel) |
| 960 | { |
| 961 | return htonl(tclass << IPV6_TCLASS_SHIFT) | flowlabel; |
| 962 | } |
| 963 | |
Michal Kubecek | fa1be7e | 2018-06-04 11:36:05 +0200 | [diff] [blame] | 964 | static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6) |
| 965 | { |
| 966 | return fl6->flowlabel & IPV6_FLOWLABEL_MASK; |
| 967 | } |
| 968 | |
YOSHIFUJI Hideaki / 吉藤英明 | 3e4e4c1 | 2013-01-13 05:01:39 +0000 | [diff] [blame] | 969 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 970 | * Prototypes exported by ipv6 |
| 971 | */ |
| 972 | |
| 973 | /* |
| 974 | * rcv function (called from netdevice level) |
| 975 | */ |
| 976 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 977 | int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, |
| 978 | struct packet_type *pt, struct net_device *orig_dev); |
Edward Cree | d8269e2 | 2018-07-05 15:49:42 +0100 | [diff] [blame] | 979 | void ipv6_list_rcv(struct list_head *head, struct packet_type *pt, |
| 980 | struct net_device *orig_dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 981 | |
Eric W. Biederman | 0c4b51f | 2015-09-15 20:04:18 -0500 | [diff] [blame] | 982 | int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb); |
Patrick McHardy | b05e106 | 2006-01-06 23:03:34 -0800 | [diff] [blame] | 983 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 984 | /* |
| 985 | * upper-layer output functions |
| 986 | */ |
Eric Dumazet | 1c1e9d2 | 2015-09-25 07:39:20 -0700 | [diff] [blame] | 987 | int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6, |
Eric Dumazet | 4f6570d | 2019-09-24 08:01:14 -0700 | [diff] [blame] | 988 | __u32 mark, struct ipv6_txoptions *opt, int tclass, u32 priority); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 989 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 990 | int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 991 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 992 | int ip6_append_data(struct sock *sk, |
| 993 | int getfrag(void *from, char *to, int offset, int len, |
| 994 | int odd, struct sk_buff *skb), |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 995 | void *from, int length, int transhdrlen, |
| 996 | struct ipcm6_cookie *ipc6, struct flowi6 *fl6, |
Willem de Bruijn | 5fdaa88 | 2018-07-06 10:12:57 -0400 | [diff] [blame] | 997 | struct rt6_info *rt, unsigned int flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 998 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 999 | int ip6_push_pending_frames(struct sock *sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1000 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1001 | void ip6_flush_pending_frames(struct sock *sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1002 | |
Vlad Yasevich | 6422398 | 2015-01-31 10:40:15 -0500 | [diff] [blame] | 1003 | int ip6_send_skb(struct sk_buff *skb); |
| 1004 | |
| 1005 | struct sk_buff *__ip6_make_skb(struct sock *sk, struct sk_buff_head *queue, |
| 1006 | struct inet_cork_full *cork, |
| 1007 | struct inet6_cork *v6_cork); |
| 1008 | struct sk_buff *ip6_make_skb(struct sock *sk, |
| 1009 | int getfrag(void *from, char *to, int offset, |
| 1010 | int len, int odd, struct sk_buff *skb), |
| 1011 | void *from, int length, int transhdrlen, |
Wei Wang | 26879da | 2016-05-02 21:40:07 -0700 | [diff] [blame] | 1012 | struct ipcm6_cookie *ipc6, struct flowi6 *fl6, |
| 1013 | struct rt6_info *rt, unsigned int flags, |
Willem de Bruijn | 5fdaa88 | 2018-07-06 10:12:57 -0400 | [diff] [blame] | 1014 | struct inet_cork_full *cork); |
Vlad Yasevich | 6422398 | 2015-01-31 10:40:15 -0500 | [diff] [blame] | 1015 | |
| 1016 | static inline struct sk_buff *ip6_finish_skb(struct sock *sk) |
| 1017 | { |
| 1018 | return __ip6_make_skb(sk, &sk->sk_write_queue, &inet_sk(sk)->cork, |
| 1019 | &inet6_sk(sk)->cork); |
| 1020 | } |
| 1021 | |
Roopa Prabhu | 343d60a | 2015-07-30 13:34:53 -0700 | [diff] [blame] | 1022 | int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst, |
| 1023 | struct flowi6 *fl6); |
Sabrina Dubroca | c4e85f7 | 2019-12-04 15:35:52 +0100 | [diff] [blame] | 1024 | struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6, |
Steffen Klassert | 0e0d44a | 2013-08-28 08:04:14 +0200 | [diff] [blame] | 1025 | const struct in6_addr *final_dst); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1026 | struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6, |
Alexey Kodanev | 9681815 | 2018-04-03 15:00:08 +0300 | [diff] [blame] | 1027 | const struct in6_addr *final_dst, |
| 1028 | bool connected); |
Martin Varghese | 571912c | 2020-02-24 10:57:50 +0530 | [diff] [blame] | 1029 | struct dst_entry *ip6_dst_lookup_tunnel(struct sk_buff *skb, |
| 1030 | struct net_device *dev, |
| 1031 | struct net *net, struct socket *sock, |
| 1032 | struct in6_addr *saddr, |
| 1033 | const struct ip_tunnel_info *info, |
| 1034 | u8 protocol, bool use_cache); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1035 | struct dst_entry *ip6_blackhole_route(struct net *net, |
| 1036 | struct dst_entry *orig_dst); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1037 | |
| 1038 | /* |
| 1039 | * skb processing functions |
| 1040 | */ |
| 1041 | |
Eric W. Biederman | ede2059 | 2015-10-07 16:48:47 -0500 | [diff] [blame] | 1042 | int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1043 | int ip6_forward(struct sk_buff *skb); |
| 1044 | int ip6_input(struct sk_buff *skb); |
| 1045 | int ip6_mc_input(struct sk_buff *skb); |
Paolo Abeni | 80bde36 | 2018-11-07 12:38:32 +0100 | [diff] [blame] | 1046 | void ip6_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int nexthdr, |
| 1047 | bool have_final); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1048 | |
Eric W. Biederman | cf91a99 | 2015-10-07 16:48:45 -0500 | [diff] [blame] | 1049 | int __ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
Eric W. Biederman | 33224b1 | 2015-10-07 16:48:46 -0500 | [diff] [blame] | 1050 | int ip6_local_out(struct net *net, struct sock *sk, struct sk_buff *skb); |
Herbert Xu | ef76bc2 | 2008-01-11 19:15:08 -0800 | [diff] [blame] | 1051 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1052 | /* |
| 1053 | * Extension header (options) processing |
| 1054 | */ |
| 1055 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1056 | void ipv6_push_nfrag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, |
David Lebrun | 613fa3c | 2016-11-08 14:59:20 +0100 | [diff] [blame] | 1057 | u8 *proto, struct in6_addr **daddr_p, |
| 1058 | struct in6_addr *saddr); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1059 | void ipv6_push_frag_opts(struct sk_buff *skb, struct ipv6_txoptions *opt, |
| 1060 | u8 *proto); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1061 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1062 | int ipv6_skip_exthdr(const struct sk_buff *, int start, u8 *nexthdrp, |
| 1063 | __be16 *frag_offp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1064 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1065 | bool ipv6_ext_hdr(u8 nexthdr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1066 | |
Jesse Gross | f8f6267 | 2012-11-09 17:05:07 -0800 | [diff] [blame] | 1067 | enum { |
Ansis Atteka | 9195bb8 | 2012-11-09 17:11:31 -0800 | [diff] [blame] | 1068 | IP6_FH_F_FRAG = (1 << 0), |
| 1069 | IP6_FH_F_AUTH = (1 << 1), |
| 1070 | IP6_FH_F_SKIP_RH = (1 << 2), |
Jesse Gross | f8f6267 | 2012-11-09 17:05:07 -0800 | [diff] [blame] | 1071 | }; |
| 1072 | |
| 1073 | /* find specified header and get offset to it */ |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1074 | int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, int target, |
| 1075 | unsigned short *fragoff, int *fragflg); |
Jesse Gross | f8f6267 | 2012-11-09 17:05:07 -0800 | [diff] [blame] | 1076 | |
Huw Davies | 0868383 | 2016-06-27 15:06:15 -0400 | [diff] [blame] | 1077 | int ipv6_find_tlv(const struct sk_buff *skb, int offset, int type); |
Masahide NAKAMURA | c61a4043 | 2006-08-23 19:18:35 -0700 | [diff] [blame] | 1078 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1079 | struct in6_addr *fl6_update_dst(struct flowi6 *fl6, |
| 1080 | const struct ipv6_txoptions *opt, |
| 1081 | struct in6_addr *orig); |
Arnaud Ebalard | 20c59de | 2010-06-01 21:35:01 +0000 | [diff] [blame] | 1082 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1083 | /* |
| 1084 | * socket options (ipv6_sockglue.c) |
| 1085 | */ |
| 1086 | |
Christoph Hellwig | a7b75c5 | 2020-07-23 08:09:07 +0200 | [diff] [blame] | 1087 | int ipv6_setsockopt(struct sock *sk, int level, int optname, sockptr_t optval, |
| 1088 | unsigned int optlen); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1089 | int ipv6_getsockopt(struct sock *sk, int level, int optname, |
| 1090 | char __user *optval, int __user *optlen); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1091 | |
Guillaume Nault | 0382a25 | 2016-11-29 13:09:44 +0100 | [diff] [blame] | 1092 | int __ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, |
| 1093 | int addr_len); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1094 | int ip6_datagram_connect(struct sock *sk, struct sockaddr *addr, int addr_len); |
Hannes Frederic Sowa | 82b276c | 2014-01-20 05:16:39 +0100 | [diff] [blame] | 1095 | int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *addr, |
| 1096 | int addr_len); |
Martin KaFai Lau | 33c162a | 2016-04-11 15:29:36 -0700 | [diff] [blame] | 1097 | int ip6_datagram_dst_update(struct sock *sk, bool fix_sk_saddr); |
Martin KaFai Lau | e646b65 | 2016-04-11 15:29:37 -0700 | [diff] [blame] | 1098 | void ip6_datagram_release_cb(struct sock *sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1099 | |
Hannes Frederic Sowa | 85fbaa7 | 2013-11-23 00:46:12 +0100 | [diff] [blame] | 1100 | int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, |
| 1101 | int *addr_len); |
| 1102 | int ipv6_recv_rxpmtu(struct sock *sk, struct msghdr *msg, int len, |
| 1103 | int *addr_len); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1104 | void ipv6_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port, |
| 1105 | u32 info, u8 *payload); |
| 1106 | void ipv6_local_error(struct sock *sk, int err, struct flowi6 *fl6, u32 info); |
| 1107 | void ipv6_local_rxpmtu(struct sock *sk, struct flowi6 *fl6, u32 mtu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1108 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1109 | int inet6_release(struct socket *sock); |
| 1110 | int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len); |
Denys Vlasenko | 9b2c45d | 2018-02-12 20:00:20 +0100 | [diff] [blame] | 1111 | int inet6_getname(struct socket *sock, struct sockaddr *uaddr, |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1112 | int peer); |
| 1113 | int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); |
Christoph Hellwig | 3986912 | 2020-05-18 08:28:06 +0200 | [diff] [blame] | 1114 | int inet6_compat_ioctl(struct socket *sock, unsigned int cmd, |
| 1115 | unsigned long arg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1116 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1117 | int inet6_hash_connect(struct inet_timewait_death_row *death_row, |
Arnaldo Carvalho de Melo | d8313f5 | 2005-12-13 23:25:44 -0800 | [diff] [blame] | 1118 | struct sock *sk); |
Florian Westphal | e42f1ac | 2020-01-24 16:04:02 -0800 | [diff] [blame] | 1119 | int inet6_sendmsg(struct socket *sock, struct msghdr *msg, size_t size); |
| 1120 | int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, |
| 1121 | int flags); |
Arnaldo Carvalho de Melo | d8313f5 | 2005-12-13 23:25:44 -0800 | [diff] [blame] | 1122 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1123 | /* |
| 1124 | * reassembly.c |
| 1125 | */ |
Eric Dumazet | 90ddc4f | 2005-12-22 12:49:22 -0800 | [diff] [blame] | 1126 | extern const struct proto_ops inet6_stream_ops; |
| 1127 | extern const struct proto_ops inet6_dgram_ops; |
Eric Dumazet | 77d4b1d | 2017-06-03 09:29:25 -0700 | [diff] [blame] | 1128 | extern const struct proto_ops inet6_sockraw_ops; |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1129 | |
Arnaldo Carvalho de Melo | 14c8502 | 2005-12-27 02:43:12 -0200 | [diff] [blame] | 1130 | struct group_source_req; |
| 1131 | struct group_filter; |
| 1132 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1133 | int ip6_mc_source(int add, int omode, struct sock *sk, |
| 1134 | struct group_source_req *pgsr); |
Al Viro | d59eb17 | 2020-03-30 15:43:10 -0400 | [diff] [blame] | 1135 | int ip6_mc_msfilter(struct sock *sk, struct group_filter *gsf, |
| 1136 | struct sockaddr_storage *list); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1137 | int ip6_mc_msfget(struct sock *sk, struct group_filter *gsf, |
Al Viro | 931ca7a | 2020-03-29 17:18:30 -0400 | [diff] [blame] | 1138 | struct sockaddr_storage __user *p); |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1139 | |
| 1140 | #ifdef CONFIG_PROC_FS |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1141 | int ac6_proc_init(struct net *net); |
| 1142 | void ac6_proc_exit(struct net *net); |
| 1143 | int raw6_proc_init(void); |
| 1144 | void raw6_proc_exit(void); |
| 1145 | int tcp6_proc_init(struct net *net); |
| 1146 | void tcp6_proc_exit(struct net *net); |
| 1147 | int udp6_proc_init(struct net *net); |
| 1148 | void udp6_proc_exit(struct net *net); |
| 1149 | int udplite6_proc_init(void); |
| 1150 | void udplite6_proc_exit(void); |
| 1151 | int ipv6_misc_proc_init(void); |
| 1152 | void ipv6_misc_proc_exit(void); |
| 1153 | int snmp6_register_dev(struct inet6_dev *idev); |
| 1154 | int snmp6_unregister_dev(struct inet6_dev *idev); |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1155 | |
Herbert Xu | 7f7d9a6 | 2007-04-24 21:54:09 -0700 | [diff] [blame] | 1156 | #else |
Daniel Lezcano | 6ab57e7 | 2008-03-26 16:52:32 -0700 | [diff] [blame] | 1157 | static inline int ac6_proc_init(struct net *net) { return 0; } |
| 1158 | static inline void ac6_proc_exit(struct net *net) { } |
| 1159 | static inline int snmp6_register_dev(struct inet6_dev *idev) { return 0; } |
| 1160 | static inline int snmp6_unregister_dev(struct inet6_dev *idev) { return 0; } |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1161 | #endif |
| 1162 | |
| 1163 | #ifdef CONFIG_SYSCTL |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 1164 | struct ctl_table *ipv6_icmp_sysctl_init(struct net *net); |
| 1165 | struct ctl_table *ipv6_route_sysctl_init(struct net *net); |
| 1166 | int ipv6_sysctl_register(void); |
| 1167 | void ipv6_sysctl_unregister(void); |
Arnaldo Carvalho de Melo | 2038073 | 2005-08-16 02:18:02 -0300 | [diff] [blame] | 1168 | #endif |
| 1169 | |
Madhu Challa | 46a4dee | 2015-02-25 09:58:34 -0800 | [diff] [blame] | 1170 | int ipv6_sock_mc_join(struct sock *sk, int ifindex, |
| 1171 | const struct in6_addr *addr); |
Hangbin Liu | c7ea20c | 2018-07-10 22:41:27 +0800 | [diff] [blame] | 1172 | int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex, |
| 1173 | const struct in6_addr *addr, unsigned int mode); |
Madhu Challa | 46a4dee | 2015-02-25 09:58:34 -0800 | [diff] [blame] | 1174 | int ipv6_sock_mc_drop(struct sock *sk, int ifindex, |
| 1175 | const struct in6_addr *addr); |
Christoph Hellwig | 9b11574 | 2020-05-28 07:12:31 +0200 | [diff] [blame] | 1176 | |
| 1177 | static inline int ip6_sock_set_v6only(struct sock *sk) |
| 1178 | { |
| 1179 | if (inet_sk(sk)->inet_num) |
| 1180 | return -EINVAL; |
| 1181 | lock_sock(sk); |
| 1182 | sk->sk_ipv6only = true; |
| 1183 | release_sock(sk); |
| 1184 | return 0; |
| 1185 | } |
| 1186 | |
Christoph Hellwig | fce9349 | 2020-05-28 07:12:32 +0200 | [diff] [blame] | 1187 | static inline void ip6_sock_set_recverr(struct sock *sk) |
| 1188 | { |
| 1189 | lock_sock(sk); |
| 1190 | inet6_sk(sk)->recverr = true; |
| 1191 | release_sock(sk); |
| 1192 | } |
| 1193 | |
Christoph Hellwig | 18d5ad6 | 2020-05-28 07:12:33 +0200 | [diff] [blame] | 1194 | static inline int __ip6_sock_set_addr_preferences(struct sock *sk, int val) |
| 1195 | { |
| 1196 | unsigned int pref = 0; |
| 1197 | unsigned int prefmask = ~0; |
| 1198 | |
| 1199 | /* check PUBLIC/TMP/PUBTMP_DEFAULT conflicts */ |
| 1200 | switch (val & (IPV6_PREFER_SRC_PUBLIC | |
| 1201 | IPV6_PREFER_SRC_TMP | |
| 1202 | IPV6_PREFER_SRC_PUBTMP_DEFAULT)) { |
| 1203 | case IPV6_PREFER_SRC_PUBLIC: |
| 1204 | pref |= IPV6_PREFER_SRC_PUBLIC; |
| 1205 | prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | |
| 1206 | IPV6_PREFER_SRC_TMP); |
| 1207 | break; |
| 1208 | case IPV6_PREFER_SRC_TMP: |
| 1209 | pref |= IPV6_PREFER_SRC_TMP; |
| 1210 | prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | |
| 1211 | IPV6_PREFER_SRC_TMP); |
| 1212 | break; |
| 1213 | case IPV6_PREFER_SRC_PUBTMP_DEFAULT: |
| 1214 | prefmask &= ~(IPV6_PREFER_SRC_PUBLIC | |
| 1215 | IPV6_PREFER_SRC_TMP); |
| 1216 | break; |
| 1217 | case 0: |
| 1218 | break; |
| 1219 | default: |
| 1220 | return -EINVAL; |
| 1221 | } |
| 1222 | |
| 1223 | /* check HOME/COA conflicts */ |
| 1224 | switch (val & (IPV6_PREFER_SRC_HOME | IPV6_PREFER_SRC_COA)) { |
| 1225 | case IPV6_PREFER_SRC_HOME: |
| 1226 | prefmask &= ~IPV6_PREFER_SRC_COA; |
| 1227 | break; |
| 1228 | case IPV6_PREFER_SRC_COA: |
| 1229 | pref |= IPV6_PREFER_SRC_COA; |
| 1230 | break; |
| 1231 | case 0: |
| 1232 | break; |
| 1233 | default: |
| 1234 | return -EINVAL; |
| 1235 | } |
| 1236 | |
| 1237 | /* check CGA/NONCGA conflicts */ |
| 1238 | switch (val & (IPV6_PREFER_SRC_CGA|IPV6_PREFER_SRC_NONCGA)) { |
| 1239 | case IPV6_PREFER_SRC_CGA: |
| 1240 | case IPV6_PREFER_SRC_NONCGA: |
| 1241 | case 0: |
| 1242 | break; |
| 1243 | default: |
| 1244 | return -EINVAL; |
| 1245 | } |
| 1246 | |
| 1247 | inet6_sk(sk)->srcprefs = (inet6_sk(sk)->srcprefs & prefmask) | pref; |
| 1248 | return 0; |
| 1249 | } |
| 1250 | |
| 1251 | static inline int ip6_sock_set_addr_preferences(struct sock *sk, bool val) |
| 1252 | { |
| 1253 | int ret; |
| 1254 | |
| 1255 | lock_sock(sk); |
| 1256 | ret = __ip6_sock_set_addr_preferences(sk, val); |
| 1257 | release_sock(sk); |
| 1258 | return ret; |
| 1259 | } |
| 1260 | |
Christoph Hellwig | 7d7207c | 2020-05-28 07:12:34 +0200 | [diff] [blame] | 1261 | static inline void ip6_sock_set_recvpktinfo(struct sock *sk) |
| 1262 | { |
| 1263 | lock_sock(sk); |
| 1264 | inet6_sk(sk)->rxopt.bits.rxinfo = true; |
| 1265 | release_sock(sk); |
| 1266 | } |
| 1267 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1268 | #endif /* _NET_IPV6_H */ |