Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #ifndef _NET_IP6_ROUTE_H |
| 3 | #define _NET_IP6_ROUTE_H |
| 4 | |
YOSHIFUJI Hideaki | 70ceb4f | 2006-03-20 17:06:24 -0800 | [diff] [blame] | 5 | struct route_info { |
| 6 | __u8 type; |
| 7 | __u8 length; |
| 8 | __u8 prefix_len; |
| 9 | #if defined(__BIG_ENDIAN_BITFIELD) |
| 10 | __u8 reserved_h:3, |
| 11 | route_pref:2, |
| 12 | reserved_l:3; |
| 13 | #elif defined(__LITTLE_ENDIAN_BITFIELD) |
| 14 | __u8 reserved_l:3, |
| 15 | route_pref:2, |
| 16 | reserved_h:3; |
| 17 | #endif |
Al Viro | e69a4ad | 2006-11-14 20:56:00 -0800 | [diff] [blame] | 18 | __be32 lifetime; |
Gustavo A. R. Silva | 207644f | 2020-02-28 18:44:10 -0600 | [diff] [blame] | 19 | __u8 prefix[]; /* 0,8 or 16 */ |
YOSHIFUJI Hideaki | 70ceb4f | 2006-03-20 17:06:24 -0800 | [diff] [blame] | 20 | }; |
| 21 | |
David Ahern | a2e2ff5 | 2016-06-16 16:24:24 -0700 | [diff] [blame] | 22 | #include <net/addrconf.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <net/flow.h> |
| 24 | #include <net/ip6_fib.h> |
| 25 | #include <net/sock.h> |
David Ahern | f06b754 | 2017-07-05 14:41:46 -0600 | [diff] [blame] | 26 | #include <net/lwtunnel.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <linux/ip.h> |
| 28 | #include <linux/ipv6.h> |
YOSHIFUJI Hideaki / 吉藤英明 | 9bb5a14 | 2013-01-17 12:53:48 +0000 | [diff] [blame] | 29 | #include <linux/route.h> |
David Ahern | f88d8ea | 2019-06-03 20:19:52 -0700 | [diff] [blame] | 30 | #include <net/nexthop.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | |
YOSHIFUJI Hideaki | 7cbca67 | 2008-03-25 09:37:42 +0900 | [diff] [blame] | 32 | #define RT6_LOOKUP_F_IFACE 0x00000001 |
| 33 | #define RT6_LOOKUP_F_REACHABLE 0x00000002 |
| 34 | #define RT6_LOOKUP_F_HAS_SADDR 0x00000004 |
| 35 | #define RT6_LOOKUP_F_SRCPREF_TMP 0x00000008 |
| 36 | #define RT6_LOOKUP_F_SRCPREF_PUBLIC 0x00000010 |
| 37 | #define RT6_LOOKUP_F_SRCPREF_COA 0x00000020 |
David Ahern | d5d32e4 | 2016-10-24 12:27:23 -0700 | [diff] [blame] | 38 | #define RT6_LOOKUP_F_IGNORE_LINKSTATE 0x00000040 |
Wei Wang | 0e09edc | 2019-06-20 17:36:37 -0700 | [diff] [blame] | 39 | #define RT6_LOOKUP_F_DST_NOREF 0x00000080 |
YOSHIFUJI Hideaki | 77d16f4 | 2006-08-23 17:25:05 -0700 | [diff] [blame] | 40 | |
Eric Dumazet | 30f78d8 | 2014-04-10 21:23:36 -0700 | [diff] [blame] | 41 | /* We do not (yet ?) support IPv6 jumbograms (RFC 2675) |
| 42 | * Unlike IPv4, hdr->seg_len doesn't include the IPv6 header |
| 43 | */ |
| 44 | #define IP6_MAX_MTU (0xFFFF + sizeof(struct ipv6hdr)) |
| 45 | |
YOSHIFUJI Hideaki / 吉藤英明 | 0c9a2ac | 2010-03-07 00:14:44 +0000 | [diff] [blame] | 46 | /* |
| 47 | * rt6_srcprefs2flags() and rt6_flags2srcprefs() translate |
| 48 | * between IPV6_ADDR_PREFERENCES socket option values |
| 49 | * IPV6_PREFER_SRC_TMP = 0x1 |
| 50 | * IPV6_PREFER_SRC_PUBLIC = 0x2 |
| 51 | * IPV6_PREFER_SRC_COA = 0x4 |
| 52 | * and above RT6_LOOKUP_F_SRCPREF_xxx flags. |
| 53 | */ |
| 54 | static inline int rt6_srcprefs2flags(unsigned int srcprefs) |
| 55 | { |
| 56 | /* No need to bitmask because srcprefs have only 3 bits. */ |
| 57 | return srcprefs << 3; |
| 58 | } |
| 59 | |
| 60 | static inline unsigned int rt6_flags2srcprefs(int flags) |
| 61 | { |
| 62 | return (flags >> 3) & 7; |
| 63 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | |
Wang Yufen | 60ea37f | 2014-03-29 09:27:30 +0800 | [diff] [blame] | 65 | static inline bool rt6_need_strict(const struct in6_addr *daddr) |
| 66 | { |
| 67 | return ipv6_addr_type(daddr) & |
| 68 | (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK); |
| 69 | } |
David S. Miller | fbfe95a | 2012-06-08 23:24:18 -0700 | [diff] [blame] | 70 | |
David Ahern | f88d8ea | 2019-06-03 20:19:52 -0700 | [diff] [blame] | 71 | /* fib entries using a nexthop object can not be coalesced into |
| 72 | * a multipath route |
| 73 | */ |
David Ahern | 33bd5ac | 2018-07-03 14:36:21 -0700 | [diff] [blame] | 74 | static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i) |
| 75 | { |
David Ahern | be659b8 | 2019-04-21 17:39:18 -0700 | [diff] [blame] | 76 | /* the RTF_ADDRCONF flag filters out RA's */ |
David Ahern | f88d8ea | 2019-06-03 20:19:52 -0700 | [diff] [blame] | 77 | return !(f6i->fib6_flags & RTF_ADDRCONF) && !f6i->nh && |
David Ahern | 1cf844c | 2019-05-22 20:27:59 -0700 | [diff] [blame] | 78 | f6i->fib6_nh->fib_nh_gw_family; |
David Ahern | 33bd5ac | 2018-07-03 14:36:21 -0700 | [diff] [blame] | 79 | } |
| 80 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 81 | void ip6_route_input(struct sk_buff *skb); |
Mahesh Bandewar | d409b84 | 2016-09-16 12:59:08 -0700 | [diff] [blame] | 82 | struct dst_entry *ip6_route_input_lookup(struct net *net, |
| 83 | struct net_device *dev, |
David Ahern | b75cc8f | 2018-03-02 08:32:17 -0800 | [diff] [blame] | 84 | struct flowi6 *fl6, |
| 85 | const struct sk_buff *skb, int flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 86 | |
Wei Wang | 7d9e5f4 | 2019-06-20 17:36:41 -0700 | [diff] [blame] | 87 | struct dst_entry *ip6_route_output_flags_noref(struct net *net, |
| 88 | const struct sock *sk, |
| 89 | struct flowi6 *fl6, int flags); |
| 90 | |
Paolo Abeni | 6f21c96 | 2016-01-29 12:30:19 +0100 | [diff] [blame] | 91 | struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, |
| 92 | struct flowi6 *fl6, int flags); |
| 93 | |
| 94 | static inline struct dst_entry *ip6_route_output(struct net *net, |
| 95 | const struct sock *sk, |
| 96 | struct flowi6 *fl6) |
| 97 | { |
| 98 | return ip6_route_output_flags(net, sk, fl6, 0); |
| 99 | } |
| 100 | |
Wei Wang | d64a1f5 | 2019-06-20 17:36:39 -0700 | [diff] [blame] | 101 | /* Only conditionally release dst if flags indicates |
| 102 | * !RT6_LOOKUP_F_DST_NOREF or dst is in uncached_list. |
| 103 | */ |
| 104 | static inline void ip6_rt_put_flags(struct rt6_info *rt, int flags) |
| 105 | { |
| 106 | if (!(flags & RT6_LOOKUP_F_DST_NOREF) || |
| 107 | !list_empty(&rt->rt6i_uncached)) |
| 108 | ip6_rt_put(rt); |
| 109 | } |
| 110 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 111 | struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, |
David Ahern | b75cc8f | 2018-03-02 08:32:17 -0800 | [diff] [blame] | 112 | const struct sk_buff *skb, int flags); |
David Ahern | 9ff7438 | 2016-06-13 13:44:19 -0700 | [diff] [blame] | 113 | struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, |
David Ahern | b75cc8f | 2018-03-02 08:32:17 -0800 | [diff] [blame] | 114 | int ifindex, struct flowi6 *fl6, |
| 115 | const struct sk_buff *skb, int flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | |
WANG Cong | 2f46093 | 2017-05-03 22:07:31 -0700 | [diff] [blame] | 117 | void ip6_route_init_special_entries(void); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 118 | int ip6_route_init(void); |
| 119 | void ip6_route_cleanup(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
Christoph Hellwig | 7c1552da | 2020-05-18 08:28:05 +0200 | [diff] [blame] | 121 | int ipv6_route_ioctl(struct net *net, unsigned int cmd, |
| 122 | struct in6_rtmsg *rtmsg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 123 | |
David Ahern | acb54e3 | 2018-04-17 17:33:22 -0700 | [diff] [blame] | 124 | int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags, |
| 125 | struct netlink_ext_ack *extack); |
David Ahern | 93c2fb2 | 2018-04-18 15:38:59 -0700 | [diff] [blame] | 126 | int ip6_ins_rt(struct net *net, struct fib6_info *f6i); |
Roopa Prabhu | 11dd74b | 2020-04-27 13:56:45 -0700 | [diff] [blame] | 127 | int ip6_del_rt(struct net *net, struct fib6_info *f6i, bool skip_notify); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 128 | |
David Ahern | 93c2fb2 | 2018-04-18 15:38:59 -0700 | [diff] [blame] | 129 | void rt6_flush_exceptions(struct fib6_info *f6i); |
| 130 | void rt6_age_exceptions(struct fib6_info *f6i, struct fib6_gc_args *gc_args, |
Wei Wang | c757faa | 2017-10-06 12:06:01 -0700 | [diff] [blame] | 131 | unsigned long now); |
Wei Wang | 35732d0 | 2017-10-06 12:05:57 -0700 | [diff] [blame] | 132 | |
David Ahern | 93c2fb2 | 2018-04-18 15:38:59 -0700 | [diff] [blame] | 133 | static inline int ip6_route_get_saddr(struct net *net, struct fib6_info *f6i, |
David Ahern | a2e2ff5 | 2016-06-16 16:24:24 -0700 | [diff] [blame] | 134 | const struct in6_addr *daddr, |
| 135 | unsigned int prefs, |
| 136 | struct in6_addr *saddr) |
| 137 | { |
David Ahern | a2e2ff5 | 2016-06-16 16:24:24 -0700 | [diff] [blame] | 138 | int err = 0; |
| 139 | |
David Ahern | 6fe7494 | 2018-04-18 15:39:03 -0700 | [diff] [blame] | 140 | if (f6i && f6i->fib6_prefsrc.plen) { |
David Ahern | 93c2fb2 | 2018-04-18 15:38:59 -0700 | [diff] [blame] | 141 | *saddr = f6i->fib6_prefsrc.addr; |
David Ahern | 6fe7494 | 2018-04-18 15:39:03 -0700 | [diff] [blame] | 142 | } else { |
| 143 | struct net_device *dev = f6i ? fib6_info_nh_dev(f6i) : NULL; |
| 144 | |
| 145 | err = ipv6_dev_get_saddr(net, dev, daddr, prefs, saddr); |
| 146 | } |
David Ahern | a2e2ff5 | 2016-06-16 16:24:24 -0700 | [diff] [blame] | 147 | |
| 148 | return err; |
| 149 | } |
Daniel Walter | c3968a8 | 2011-04-13 21:10:57 +0000 | [diff] [blame] | 150 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 151 | struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, |
David Ahern | b75cc8f | 2018-03-02 08:32:17 -0800 | [diff] [blame] | 152 | const struct in6_addr *saddr, int oif, |
| 153 | const struct sk_buff *skb, int flags); |
David Ahern | b4bac17 | 2018-03-02 08:32:18 -0800 | [diff] [blame] | 154 | u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6, |
| 155 | const struct sk_buff *skb, struct flow_keys *hkeys); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 157 | struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6); |
YOSHIFUJI Hideaki | 3b00944 | 2007-12-06 16:11:48 -0800 | [diff] [blame] | 158 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 159 | void fib6_force_start_gc(struct net *net); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 160 | |
David Ahern | 360a988 | 2018-04-18 15:39:00 -0700 | [diff] [blame] | 161 | struct fib6_info *addrconf_f6i_alloc(struct net *net, struct inet6_dev *idev, |
David Ahern | 8d1c802 | 2018-04-17 17:33:26 -0700 | [diff] [blame] | 162 | const struct in6_addr *addr, bool anycast, |
| 163 | gfp_t gfp_flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
David Ahern | 9ab179d | 2016-04-07 11:10:06 -0700 | [diff] [blame] | 165 | struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, |
| 166 | int flags); |
| 167 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | /* |
| 169 | * support functions for ND |
| 170 | * |
| 171 | */ |
David Ahern | 8d1c802 | 2018-04-17 17:33:26 -0700 | [diff] [blame] | 172 | struct fib6_info *rt6_get_dflt_router(struct net *net, |
David Ahern | afb1d4b5 | 2018-04-17 17:33:11 -0700 | [diff] [blame] | 173 | const struct in6_addr *addr, |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 174 | struct net_device *dev); |
David Ahern | 8d1c802 | 2018-04-17 17:33:26 -0700 | [diff] [blame] | 175 | struct fib6_info *rt6_add_dflt_router(struct net *net, |
David Ahern | afb1d4b5 | 2018-04-17 17:33:11 -0700 | [diff] [blame] | 176 | const struct in6_addr *gwaddr, |
Praveen Chaudhary | 6b2e04b | 2021-01-25 13:44:30 -0800 | [diff] [blame] | 177 | struct net_device *dev, unsigned int pref, |
| 178 | u32 defrtr_usr_metric); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 180 | void rt6_purge_dflt_routers(struct net *net); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 182 | int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, |
| 183 | const struct in6_addr *gwaddr); |
YOSHIFUJI Hideaki | 70ceb4f | 2006-03-20 17:06:24 -0800 | [diff] [blame] | 184 | |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 185 | void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif, |
Lorenzo Colitti | e2d118a | 2016-11-04 02:23:43 +0900 | [diff] [blame] | 186 | u32 mark, kuid_t uid); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 187 | void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu); |
Lorenzo Colitti | e2d118a | 2016-11-04 02:23:43 +0900 | [diff] [blame] | 188 | void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, |
| 189 | kuid_t uid); |
Maciej Żenczykowski | d456336 | 2018-09-29 23:44:50 -0700 | [diff] [blame] | 190 | void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 191 | void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | struct netlink_callback; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | |
Eric Dumazet | fd2c3ef | 2009-11-03 03:26:03 +0000 | [diff] [blame] | 195 | struct rt6_rtnl_dump_arg { |
Patrick McHardy | 1b43af5 | 2006-08-10 23:11:17 -0700 | [diff] [blame] | 196 | struct sk_buff *skb; |
| 197 | struct netlink_callback *cb; |
Brian Haley | 191cd58 | 2008-08-14 15:33:21 -0700 | [diff] [blame] | 198 | struct net *net; |
David Ahern | 4724676 | 2018-10-15 18:56:42 -0700 | [diff] [blame] | 199 | struct fib_dump_filter filter; |
Patrick McHardy | 1b43af5 | 2006-08-10 23:11:17 -0700 | [diff] [blame] | 200 | }; |
| 201 | |
Stefano Brivio | 1e47b48 | 2019-06-21 17:45:27 +0200 | [diff] [blame] | 202 | int rt6_dump_route(struct fib6_info *f6i, void *p_arg, unsigned int skip); |
Joe Perches | 5c3a0fd | 2013-09-21 10:22:42 -0700 | [diff] [blame] | 203 | void rt6_mtu_change(struct net_device *dev, unsigned int mtu); |
| 204 | void rt6_remove_prefsrc(struct inet6_ifaddr *ifp); |
Duan Jiong | be7a010 | 2014-05-15 15:56:14 +0800 | [diff] [blame] | 205 | void rt6_clean_tohost(struct net *net, struct in6_addr *gateway); |
David Ahern | ecc5663 | 2019-04-23 08:48:09 -0700 | [diff] [blame] | 206 | void rt6_sync_up(struct net_device *dev, unsigned char nh_flags); |
Ido Schimmel | 4c981e2 | 2018-01-07 12:45:04 +0200 | [diff] [blame] | 207 | void rt6_disable_ip(struct net_device *dev, unsigned long event); |
Ido Schimmel | 27c6fa7 | 2018-01-07 12:45:05 +0200 | [diff] [blame] | 208 | void rt6_sync_down_dev(struct net_device *dev, unsigned long event); |
David Ahern | 93c2fb2 | 2018-04-18 15:38:59 -0700 | [diff] [blame] | 209 | void rt6_multipath_rebalance(struct fib6_info *f6i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | |
Xin Long | 510c321 | 2018-02-14 19:06:02 +0800 | [diff] [blame] | 211 | void rt6_uncached_list_add(struct rt6_info *rt); |
| 212 | void rt6_uncached_list_del(struct rt6_info *rt); |
| 213 | |
David Ahern | 1b70d792 | 2017-08-28 13:53:34 -0700 | [diff] [blame] | 214 | static inline const struct rt6_info *skb_rt6_info(const struct sk_buff *skb) |
| 215 | { |
| 216 | const struct dst_entry *dst = skb_dst(skb); |
| 217 | const struct rt6_info *rt6 = NULL; |
| 218 | |
| 219 | if (dst) |
| 220 | rt6 = container_of(dst, struct rt6_info, dst); |
| 221 | |
| 222 | return rt6; |
| 223 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 224 | |
| 225 | /* |
| 226 | * Store a destination cache entry in a socket |
| 227 | */ |
Eric Dumazet | 6bd4f35 | 2015-12-02 21:53:57 -0800 | [diff] [blame] | 228 | static inline void ip6_dst_store(struct sock *sk, struct dst_entry *dst, |
| 229 | const struct in6_addr *daddr, |
| 230 | const struct in6_addr *saddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | { |
| 232 | struct ipv6_pinfo *np = inet6_sk(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | |
Eric Dumazet | 6bd4f35 | 2015-12-02 21:53:57 -0800 | [diff] [blame] | 234 | np->dst_cookie = rt6_get_cookie((struct rt6_info *)dst); |
Herbert Xu | f83ef8c | 2006-06-30 13:37:03 -0700 | [diff] [blame] | 235 | sk_setup_caps(sk, dst); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | np->daddr_cache = daddr; |
YOSHIFUJI Hideaki | 8e1ef0a | 2006-08-29 17:15:09 -0700 | [diff] [blame] | 237 | #ifdef CONFIG_IPV6_SUBTREES |
| 238 | np->saddr_cache = saddr; |
| 239 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 240 | } |
| 241 | |
Alexey Kodanev | 7d6850f | 2018-04-03 15:00:07 +0300 | [diff] [blame] | 242 | void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst, |
| 243 | const struct flowi6 *fl6); |
| 244 | |
Eric Dumazet | a50feda | 2012-05-18 18:57:34 +0000 | [diff] [blame] | 245 | static inline bool ipv6_unicast_destination(const struct sk_buff *skb) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 246 | { |
Eric Dumazet | adf3090 | 2009-06-02 05:19:30 +0000 | [diff] [blame] | 247 | struct rt6_info *rt = (struct rt6_info *) skb_dst(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | |
| 249 | return rt->rt6i_flags & RTF_LOCAL; |
| 250 | } |
| 251 | |
Martin KaFai Lau | 2647a9b | 2015-05-22 20:55:58 -0700 | [diff] [blame] | 252 | static inline bool ipv6_anycast_destination(const struct dst_entry *dst, |
| 253 | const struct in6_addr *daddr) |
FX Le Bail | 509aba3 | 2014-01-07 14:57:27 +0100 | [diff] [blame] | 254 | { |
Martin KaFai Lau | 2647a9b | 2015-05-22 20:55:58 -0700 | [diff] [blame] | 255 | struct rt6_info *rt = (struct rt6_info *)dst; |
FX Le Bail | 509aba3 | 2014-01-07 14:57:27 +0100 | [diff] [blame] | 256 | |
Martin KaFai Lau | 2647a9b | 2015-05-22 20:55:58 -0700 | [diff] [blame] | 257 | return rt->rt6i_flags & RTF_ANYCAST || |
Vincent Bernat | ccdb2d1 | 2017-07-15 19:40:20 +0200 | [diff] [blame] | 258 | (rt->rt6i_dst.plen < 127 && |
Tim Stallard | 03e2a98 | 2020-04-03 21:26:21 +0100 | [diff] [blame] | 259 | !(rt->rt6i_flags & (RTF_GATEWAY | RTF_NONEXTHOP)) && |
Martin KaFai Lau | 2647a9b | 2015-05-22 20:55:58 -0700 | [diff] [blame] | 260 | ipv6_addr_equal(&rt->rt6i_dst.addr, daddr)); |
FX Le Bail | 509aba3 | 2014-01-07 14:57:27 +0100 | [diff] [blame] | 261 | } |
| 262 | |
Eric W. Biederman | 7d8c6e3 | 2015-06-12 22:12:04 -0500 | [diff] [blame] | 263 | int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, |
| 264 | int (*output)(struct net *, struct sock *, struct sk_buff *)); |
David Stevens | ad0081e | 2010-12-17 11:42:42 +0000 | [diff] [blame] | 265 | |
Vadim Fedorenko | 40fc305 | 2021-07-02 02:47:00 +0300 | [diff] [blame] | 266 | static inline unsigned int ip6_skb_dst_mtu(struct sk_buff *skb) |
David Stevens | ad0081e | 2010-12-17 11:42:42 +0000 | [diff] [blame] | 267 | { |
Vadim Fedorenko | fade564 | 2021-06-25 19:21:39 +0300 | [diff] [blame] | 268 | int mtu; |
| 269 | |
hannes@stressinduktion.org | f60e599 | 2015-04-01 17:07:44 +0200 | [diff] [blame] | 270 | struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ? |
| 271 | inet6_sk(skb->sk) : NULL; |
David Stevens | ad0081e | 2010-12-17 11:42:42 +0000 | [diff] [blame] | 272 | |
Vadim Fedorenko | fade564 | 2021-06-25 19:21:39 +0300 | [diff] [blame] | 273 | if (np && np->pmtudisc >= IPV6_PMTUDISC_PROBE) { |
| 274 | mtu = READ_ONCE(skb_dst(skb)->dev->mtu); |
| 275 | mtu -= lwtunnel_headroom(skb_dst(skb)->lwtstate, mtu); |
| 276 | } else |
| 277 | mtu = dst_mtu(skb_dst(skb)); |
| 278 | |
| 279 | return mtu; |
David Stevens | ad0081e | 2010-12-17 11:42:42 +0000 | [diff] [blame] | 280 | } |
| 281 | |
Hannes Frederic Sowa | 93b36cf | 2013-12-15 03:41:14 +0100 | [diff] [blame] | 282 | static inline bool ip6_sk_accept_pmtu(const struct sock *sk) |
| 283 | { |
Hannes Frederic Sowa | 0b95227 | 2014-02-26 01:20:43 +0100 | [diff] [blame] | 284 | return inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_INTERFACE && |
| 285 | inet6_sk(sk)->pmtudisc != IPV6_PMTUDISC_OMIT; |
| 286 | } |
| 287 | |
WANG Cong | 60ff746 | 2014-05-04 16:39:18 -0700 | [diff] [blame] | 288 | static inline bool ip6_sk_ignore_df(const struct sock *sk) |
Hannes Frederic Sowa | 0b95227 | 2014-02-26 01:20:43 +0100 | [diff] [blame] | 289 | { |
| 290 | return inet6_sk(sk)->pmtudisc < IPV6_PMTUDISC_DO || |
| 291 | inet6_sk(sk)->pmtudisc == IPV6_PMTUDISC_OMIT; |
Hannes Frederic Sowa | 93b36cf | 2013-12-15 03:41:14 +0100 | [diff] [blame] | 292 | } |
| 293 | |
Nicolas Dichtel | 9b1c1ef | 2019-06-24 16:01:08 +0200 | [diff] [blame] | 294 | static inline const struct in6_addr *rt6_nexthop(const struct rt6_info *rt, |
| 295 | const struct in6_addr *daddr) |
YOSHIFUJI Hideaki / 吉藤英明 | 9bb5a14 | 2013-01-17 12:53:48 +0000 | [diff] [blame] | 296 | { |
Martin KaFai Lau | 2647a9b | 2015-05-22 20:55:58 -0700 | [diff] [blame] | 297 | if (rt->rt6i_flags & RTF_GATEWAY) |
| 298 | return &rt->rt6i_gateway; |
Martin KaFai Lau | 45e4fd2 | 2015-05-22 20:56:00 -0700 | [diff] [blame] | 299 | else if (unlikely(rt->rt6i_flags & RTF_CACHE)) |
Martin KaFai Lau | 2647a9b | 2015-05-22 20:55:58 -0700 | [diff] [blame] | 300 | return &rt->rt6i_dst.addr; |
| 301 | else |
| 302 | return daddr; |
YOSHIFUJI Hideaki / 吉藤英明 | 9bb5a14 | 2013-01-17 12:53:48 +0000 | [diff] [blame] | 303 | } |
| 304 | |
David Ahern | 8d1c802 | 2018-04-17 17:33:26 -0700 | [diff] [blame] | 305 | static inline bool rt6_duplicate_nexthop(struct fib6_info *a, struct fib6_info *b) |
David Ahern | f06b754 | 2017-07-05 14:41:46 -0600 | [diff] [blame] | 306 | { |
David Ahern | f88d8ea | 2019-06-03 20:19:52 -0700 | [diff] [blame] | 307 | struct fib6_nh *nha, *nhb; |
David Ahern | ad1601a | 2019-03-27 20:53:56 -0700 | [diff] [blame] | 308 | |
David Ahern | f88d8ea | 2019-06-03 20:19:52 -0700 | [diff] [blame] | 309 | if (a->nh || b->nh) |
| 310 | return nexthop_cmp(a->nh, b->nh); |
| 311 | |
| 312 | nha = a->fib6_nh; |
| 313 | nhb = b->fib6_nh; |
David Ahern | ad1601a | 2019-03-27 20:53:56 -0700 | [diff] [blame] | 314 | return nha->fib_nh_dev == nhb->fib_nh_dev && |
| 315 | ipv6_addr_equal(&nha->fib_nh_gw6, &nhb->fib_nh_gw6) && |
| 316 | !lwtunnel_cmp_encap(nha->fib_nh_lws, nhb->fib_nh_lws); |
David Ahern | f06b754 | 2017-07-05 14:41:46 -0600 | [diff] [blame] | 317 | } |
Roopa Prabhu | 5e5d6fe | 2018-02-28 22:43:22 -0500 | [diff] [blame] | 318 | |
Felix Fietkau | 07cb962 | 2018-02-26 10:15:10 +0100 | [diff] [blame] | 319 | static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst) |
| 320 | { |
| 321 | struct inet6_dev *idev; |
| 322 | unsigned int mtu; |
| 323 | |
| 324 | if (dst_metric_locked(dst, RTAX_MTU)) { |
| 325 | mtu = dst_metric_raw(dst, RTAX_MTU); |
| 326 | if (mtu) |
Vadim Fedorenko | fade564 | 2021-06-25 19:21:39 +0300 | [diff] [blame] | 327 | goto out; |
Felix Fietkau | 07cb962 | 2018-02-26 10:15:10 +0100 | [diff] [blame] | 328 | } |
| 329 | |
| 330 | mtu = IPV6_MIN_MTU; |
| 331 | rcu_read_lock(); |
| 332 | idev = __in6_dev_get(dst->dev); |
| 333 | if (idev) |
| 334 | mtu = idev->cnf.mtu6; |
| 335 | rcu_read_unlock(); |
| 336 | |
Vadim Fedorenko | fade564 | 2021-06-25 19:21:39 +0300 | [diff] [blame] | 337 | out: |
| 338 | return mtu - lwtunnel_headroom(dst->lwtstate, mtu); |
Felix Fietkau | 07cb962 | 2018-02-26 10:15:10 +0100 | [diff] [blame] | 339 | } |
| 340 | |
David Ahern | b748f26 | 2019-04-16 14:36:06 -0700 | [diff] [blame] | 341 | u32 ip6_mtu_from_fib6(const struct fib6_result *res, |
| 342 | const struct in6_addr *daddr, |
| 343 | const struct in6_addr *saddr); |
David Ahern | 901731b | 2018-05-21 09:08:14 -0700 | [diff] [blame] | 344 | |
David Ahern | f8a1b43 | 2018-04-17 17:33:21 -0700 | [diff] [blame] | 345 | struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw, |
| 346 | struct net_device *dev, struct sk_buff *skb, |
| 347 | const void *daddr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 348 | #endif |