blob: aade636c6be64eb7b6e4ca25f6333dead7f9f6a9 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Linux INET6 implementation
4 * FIB front-end.
5 *
6 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09007 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
10/* Changes:
11 *
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
YOSHIFUJI Hideakic0bece92006-08-23 17:23:25 -070019 * Ville Nuorvala
20 * Fixed routing subtrees.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
22
Joe Perchesf3213832012-05-15 14:11:53 +000023#define pr_fmt(fmt) "IPv6: " fmt
24
Randy Dunlap4fc268d2006-01-11 12:17:47 -080025#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/errno.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/types.h>
29#include <linux/times.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/route.h>
34#include <linux/netdevice.h>
35#include <linux/in6.h>
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +090036#include <linux/mroute6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/proc_fs.h>
40#include <linux/seq_file.h>
Daniel Lezcano5b7c9312008-03-03 23:28:58 -080041#include <linux/nsproxy.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090042#include <linux/slab.h>
Wei Wang35732d02017-10-06 12:05:57 -070043#include <linux/jhash.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020044#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <net/snmp.h>
46#include <net/ipv6.h>
47#include <net/ip6_fib.h>
48#include <net/ip6_route.h>
49#include <net/ndisc.h>
50#include <net/addrconf.h>
51#include <net/tcp.h>
52#include <linux/rtnetlink.h>
53#include <net/dst.h>
Jiri Benc904af042015-08-20 13:56:31 +020054#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <net/xfrm.h>
Tom Tucker8d717402006-07-30 20:43:36 -070056#include <net/netevent.h>
Thomas Graf21713eb2006-08-15 00:35:24 -070057#include <net/netlink.h>
David Ahern3c618c12019-04-20 09:28:20 -070058#include <net/rtnh.h>
Roopa Prabhu19e42e42015-07-21 10:43:48 +020059#include <net/lwtunnel.h>
Jiri Benc904af042015-08-20 13:56:31 +020060#include <net/ip_tunnels.h>
David Ahernca254492015-10-12 11:47:10 -070061#include <net/l3mdev.h>
Roopa Prabhueacb9382018-05-22 14:03:28 -070062#include <net/ip.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080063#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65#ifdef CONFIG_SYSCTL
66#include <linux/sysctl.h>
67#endif
68
David Ahern30d444d2018-05-23 17:08:48 -070069static int ip6_rt_type_to_error(u8 fib6_type);
70
71#define CREATE_TRACE_POINTS
72#include <trace/events/fib6.h>
73EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
74#undef CREATE_TRACE_POINTS
75
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +020076enum rt6_nud_state {
Jiri Benc7e980562013-12-11 13:48:20 +010077 RT6_NUD_FAIL_HARD = -3,
78 RT6_NUD_FAIL_PROBE = -2,
79 RT6_NUD_FAIL_DO_RR = -1,
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +020080 RT6_NUD_SUCCEED = 1
81};
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
David S. Miller0dbaee32010-12-13 12:52:14 -080084static unsigned int ip6_default_advmss(const struct dst_entry *dst);
Steffen Klassertebb762f2011-11-23 02:12:51 +000085static unsigned int ip6_mtu(const struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static struct dst_entry *ip6_negative_advice(struct dst_entry *);
87static void ip6_dst_destroy(struct dst_entry *);
88static void ip6_dst_ifdown(struct dst_entry *,
89 struct net_device *dev, int how);
Daniel Lezcano569d3642008-01-18 03:56:57 -080090static int ip6_dst_gc(struct dst_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92static int ip6_pkt_discard(struct sk_buff *skb);
Eric W. Biedermanede20592015-10-07 16:48:47 -050093static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
Kamala R7150aed2013-12-02 19:55:21 +053094static int ip6_pkt_prohibit(struct sk_buff *skb);
Eric W. Biedermanede20592015-10-07 16:48:47 -050095static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096static void ip6_link_failure(struct sk_buff *skb);
David S. Miller6700c272012-07-17 03:29:28 -070097static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
98 struct sk_buff *skb, u32 mtu);
99static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
100 struct sk_buff *skb);
David Ahern702cea52019-04-09 14:41:13 -0700101static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
102 int strict);
David Ahern8d1c8022018-04-17 17:33:26 -0700103static size_t rt6_nlmsg_size(struct fib6_info *rt);
David Ahernd4ead6b2018-04-17 17:33:16 -0700104static int rt6_fill_node(struct net *net, struct sk_buff *skb,
David Ahern8d1c8022018-04-17 17:33:26 -0700105 struct fib6_info *rt, struct dst_entry *dst,
David Ahernd4ead6b2018-04-17 17:33:16 -0700106 struct in6_addr *dest, struct in6_addr *src,
David Ahern16a16cd2017-02-02 12:37:11 -0800107 int iif, int type, u32 portid, u32 seq,
108 unsigned int flags);
David Ahern7e4b5122019-04-16 14:36:00 -0700109static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
Wei Wang510e2ce2019-05-16 13:30:54 -0700110 const struct in6_addr *daddr,
111 const struct in6_addr *saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800113#ifdef CONFIG_IPV6_ROUTE_INFO
David Ahern8d1c8022018-04-17 17:33:26 -0700114static struct fib6_info *rt6_add_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000115 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -0700116 const struct in6_addr *gwaddr,
117 struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +0000118 unsigned int pref);
David Ahern8d1c8022018-04-17 17:33:26 -0700119static struct fib6_info *rt6_get_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000120 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -0700121 const struct in6_addr *gwaddr,
122 struct net_device *dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800123#endif
124
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700125struct uncached_list {
126 spinlock_t lock;
127 struct list_head head;
128};
129
130static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
131
Xin Long510c3212018-02-14 19:06:02 +0800132void rt6_uncached_list_add(struct rt6_info *rt)
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700133{
134 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
135
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700136 rt->rt6i_uncached_list = ul;
137
138 spin_lock_bh(&ul->lock);
139 list_add_tail(&rt->rt6i_uncached, &ul->head);
140 spin_unlock_bh(&ul->lock);
141}
142
Xin Long510c3212018-02-14 19:06:02 +0800143void rt6_uncached_list_del(struct rt6_info *rt)
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700144{
145 if (!list_empty(&rt->rt6i_uncached)) {
146 struct uncached_list *ul = rt->rt6i_uncached_list;
Wei Wang81eb8442017-10-06 12:06:11 -0700147 struct net *net = dev_net(rt->dst.dev);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700148
149 spin_lock_bh(&ul->lock);
150 list_del(&rt->rt6i_uncached);
Wei Wang81eb8442017-10-06 12:06:11 -0700151 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700152 spin_unlock_bh(&ul->lock);
153 }
154}
155
156static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
157{
158 struct net_device *loopback_dev = net->loopback_dev;
159 int cpu;
160
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500161 if (dev == loopback_dev)
162 return;
163
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700164 for_each_possible_cpu(cpu) {
165 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
166 struct rt6_info *rt;
167
168 spin_lock_bh(&ul->lock);
169 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
170 struct inet6_dev *rt_idev = rt->rt6i_idev;
171 struct net_device *rt_dev = rt->dst.dev;
172
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500173 if (rt_idev->dev == dev) {
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700174 rt->rt6i_idev = in6_dev_get(loopback_dev);
175 in6_dev_put(rt_idev);
176 }
177
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500178 if (rt_dev == dev) {
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700179 rt->dst.dev = loopback_dev;
180 dev_hold(rt->dst.dev);
181 dev_put(rt_dev);
182 }
183 }
184 spin_unlock_bh(&ul->lock);
185 }
186}
187
David Ahernf8a1b432018-04-17 17:33:21 -0700188static inline const void *choose_neigh_daddr(const struct in6_addr *p,
David S. Millerf894cbf2012-07-02 21:52:24 -0700189 struct sk_buff *skb,
190 const void *daddr)
David S. Miller39232972012-01-26 15:22:32 -0500191{
David S. Millera7563f32012-01-26 16:29:16 -0500192 if (!ipv6_addr_any(p))
David S. Miller39232972012-01-26 15:22:32 -0500193 return (const void *) p;
David S. Millerf894cbf2012-07-02 21:52:24 -0700194 else if (skb)
195 return &ipv6_hdr(skb)->daddr;
David S. Miller39232972012-01-26 15:22:32 -0500196 return daddr;
197}
198
David Ahernf8a1b432018-04-17 17:33:21 -0700199struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
200 struct net_device *dev,
201 struct sk_buff *skb,
202 const void *daddr)
David S. Millerd3aaeb32011-07-18 00:40:17 -0700203{
David S. Miller39232972012-01-26 15:22:32 -0500204 struct neighbour *n;
205
David Ahernf8a1b432018-04-17 17:33:21 -0700206 daddr = choose_neigh_daddr(gw, skb, daddr);
207 n = __ipv6_neigh_lookup(dev, daddr);
David S. Millerf83c7792011-12-28 15:41:23 -0500208 if (n)
209 return n;
Stefano Brivio7adf3242019-01-02 13:29:27 +0100210
211 n = neigh_create(&nd_tbl, daddr, dev);
212 return IS_ERR(n) ? NULL : n;
David Ahernf8a1b432018-04-17 17:33:21 -0700213}
214
215static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
216 struct sk_buff *skb,
217 const void *daddr)
218{
219 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
220
221 return ip6_neigh_lookup(&rt->rt6i_gateway, dst->dev, skb, daddr);
David S. Millerf83c7792011-12-28 15:41:23 -0500222}
223
Julian Anastasov63fca652017-02-06 23:14:15 +0200224static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
225{
226 struct net_device *dev = dst->dev;
227 struct rt6_info *rt = (struct rt6_info *)dst;
228
David Ahernf8a1b432018-04-17 17:33:21 -0700229 daddr = choose_neigh_daddr(&rt->rt6i_gateway, NULL, daddr);
Julian Anastasov63fca652017-02-06 23:14:15 +0200230 if (!daddr)
231 return;
232 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
233 return;
234 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
235 return;
236 __ipv6_confirm_neigh(dev, daddr);
237}
238
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -0800239static struct dst_ops ip6_dst_ops_template = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 .gc = ip6_dst_gc,
242 .gc_thresh = 1024,
243 .check = ip6_dst_check,
David S. Miller0dbaee32010-12-13 12:52:14 -0800244 .default_advmss = ip6_default_advmss,
Steffen Klassertebb762f2011-11-23 02:12:51 +0000245 .mtu = ip6_mtu,
David Ahernd4ead6b2018-04-17 17:33:16 -0700246 .cow_metrics = dst_cow_metrics_generic,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 .destroy = ip6_dst_destroy,
248 .ifdown = ip6_dst_ifdown,
249 .negative_advice = ip6_negative_advice,
250 .link_failure = ip6_link_failure,
251 .update_pmtu = ip6_rt_update_pmtu,
David S. Miller6e157b62012-07-12 00:05:02 -0700252 .redirect = rt6_do_redirect,
Eric W. Biederman9f8955c2015-10-07 16:48:39 -0500253 .local_out = __ip6_local_out,
David Ahernf8a1b432018-04-17 17:33:21 -0700254 .neigh_lookup = ip6_dst_neigh_lookup,
Julian Anastasov63fca652017-02-06 23:14:15 +0200255 .confirm_neigh = ip6_confirm_neigh,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256};
257
Steffen Klassertebb762f2011-11-23 02:12:51 +0000258static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
Roland Dreierec831ea2011-01-31 13:16:00 -0800259{
Steffen Klassert618f9bc2011-11-23 02:13:31 +0000260 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
261
262 return mtu ? : dst->dev->mtu;
Roland Dreierec831ea2011-01-31 13:16:00 -0800263}
264
David S. Miller6700c272012-07-17 03:29:28 -0700265static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
266 struct sk_buff *skb, u32 mtu)
David S. Miller14e50e52007-05-24 18:17:54 -0700267{
268}
269
David S. Miller6700c272012-07-17 03:29:28 -0700270static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
271 struct sk_buff *skb)
David S. Millerb587ee32012-07-12 00:39:24 -0700272{
273}
274
David S. Miller14e50e52007-05-24 18:17:54 -0700275static struct dst_ops ip6_dst_blackhole_ops = {
276 .family = AF_INET6,
David S. Miller14e50e52007-05-24 18:17:54 -0700277 .destroy = ip6_dst_destroy,
278 .check = ip6_dst_check,
Steffen Klassertebb762f2011-11-23 02:12:51 +0000279 .mtu = ip6_blackhole_mtu,
Eric Dumazet214f45c2011-02-18 11:39:01 -0800280 .default_advmss = ip6_default_advmss,
David S. Miller14e50e52007-05-24 18:17:54 -0700281 .update_pmtu = ip6_rt_blackhole_update_pmtu,
David S. Millerb587ee32012-07-12 00:39:24 -0700282 .redirect = ip6_rt_blackhole_redirect,
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -0700283 .cow_metrics = dst_cow_metrics_generic,
David Ahernf8a1b432018-04-17 17:33:21 -0700284 .neigh_lookup = ip6_dst_neigh_lookup,
David S. Miller14e50e52007-05-24 18:17:54 -0700285};
286
David S. Miller62fa8a82011-01-26 20:51:05 -0800287static const u32 ip6_template_metrics[RTAX_MAX] = {
Li RongQing14edd872012-10-24 14:01:18 +0800288 [RTAX_HOPLIMIT - 1] = 0,
David S. Miller62fa8a82011-01-26 20:51:05 -0800289};
290
David Ahern8d1c8022018-04-17 17:33:26 -0700291static const struct fib6_info fib6_null_entry_template = {
David Ahern93c2fb22018-04-18 15:38:59 -0700292 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
293 .fib6_protocol = RTPROT_KERNEL,
294 .fib6_metric = ~(u32)0,
Eric Dumazetf05713e2019-04-22 18:35:03 -0700295 .fib6_ref = REFCOUNT_INIT(1),
David Ahern421842e2018-04-17 17:33:18 -0700296 .fib6_type = RTN_UNREACHABLE,
297 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
298};
299
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000300static const struct rt6_info ip6_null_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700301 .dst = {
302 .__refcnt = ATOMIC_INIT(1),
303 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000304 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700305 .error = -ENETUNREACH,
Changli Gaod8d1f302010-06-10 23:31:35 -0700306 .input = ip6_pkt_discard,
307 .output = ip6_pkt_discard_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 },
309 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310};
311
Thomas Graf101367c2006-08-04 03:39:02 -0700312#ifdef CONFIG_IPV6_MULTIPLE_TABLES
313
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000314static const struct rt6_info ip6_prohibit_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700315 .dst = {
316 .__refcnt = ATOMIC_INIT(1),
317 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000318 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700319 .error = -EACCES,
Changli Gaod8d1f302010-06-10 23:31:35 -0700320 .input = ip6_pkt_prohibit,
321 .output = ip6_pkt_prohibit_out,
Thomas Graf101367c2006-08-04 03:39:02 -0700322 },
323 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Thomas Graf101367c2006-08-04 03:39:02 -0700324};
325
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000326static const struct rt6_info ip6_blk_hole_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700327 .dst = {
328 .__refcnt = ATOMIC_INIT(1),
329 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000330 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700331 .error = -EINVAL,
Changli Gaod8d1f302010-06-10 23:31:35 -0700332 .input = dst_discard,
Eric W. Biedermanede20592015-10-07 16:48:47 -0500333 .output = dst_discard_out,
Thomas Graf101367c2006-08-04 03:39:02 -0700334 },
335 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Thomas Graf101367c2006-08-04 03:39:02 -0700336};
337
338#endif
339
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700340static void rt6_info_init(struct rt6_info *rt)
341{
342 struct dst_entry *dst = &rt->dst;
343
344 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700345 INIT_LIST_HEAD(&rt->rt6i_uncached);
346}
347
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348/* allocate dst with ip6_dst_ops */
David Ahern93531c62018-04-17 17:33:25 -0700349struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
350 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
David S. Miller97bab732012-06-09 22:36:36 -0700352 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
Wei Wangb2a9c0e2017-06-17 10:42:41 -0700353 1, DST_OBSOLETE_FORCE_CHK, flags);
David S. Millercf911662011-04-28 14:31:47 -0700354
Wei Wang81eb8442017-10-06 12:06:11 -0700355 if (rt) {
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700356 rt6_info_init(rt);
Wei Wang81eb8442017-10-06 12:06:11 -0700357 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
358 }
Steffen Klassert81048912012-07-05 23:37:09 +0000359
David S. Millercf911662011-04-28 14:31:47 -0700360 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361}
David Ahern9ab179d2016-04-07 11:10:06 -0700362EXPORT_SYMBOL(ip6_dst_alloc);
Martin KaFai Laud52d3992015-05-22 20:56:06 -0700363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364static void ip6_dst_destroy(struct dst_entry *dst)
365{
366 struct rt6_info *rt = (struct rt6_info *)dst;
David Aherna68886a2018-04-20 15:38:02 -0700367 struct fib6_info *from;
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700368 struct inet6_dev *idev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
David Ahern1620a332018-10-04 20:07:54 -0700370 ip_dst_metrics_put(dst);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700371 rt6_uncached_list_del(rt);
372
373 idev = rt->rt6i_idev;
David S. Miller38308472011-12-03 18:02:47 -0500374 if (idev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 rt->rt6i_idev = NULL;
376 in6_dev_put(idev);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900377 }
Gao feng1716a962012-04-06 00:13:10 +0000378
Eric Dumazet0e233872019-04-28 12:22:25 -0700379 from = xchg((__force struct fib6_info **)&rt->from, NULL);
David Ahern93531c62018-04-17 17:33:25 -0700380 fib6_info_release(from);
David S. Millerb3419362010-11-30 12:27:11 -0800381}
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
384 int how)
385{
386 struct rt6_info *rt = (struct rt6_info *)dst;
387 struct inet6_dev *idev = rt->rt6i_idev;
Denis V. Lunev5a3e55d2007-12-07 00:38:10 -0800388 struct net_device *loopback_dev =
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900389 dev_net(dev)->loopback_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Wei Wange5645f52017-08-14 10:44:59 -0700391 if (idev && idev->dev != loopback_dev) {
392 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
393 if (loopback_idev) {
394 rt->rt6i_idev = loopback_idev;
395 in6_dev_put(idev);
David S. Miller97cac082012-07-02 22:43:47 -0700396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 }
398}
399
Martin KaFai Lau5973fb12015-11-11 11:51:07 -0800400static bool __rt6_check_expired(const struct rt6_info *rt)
401{
402 if (rt->rt6i_flags & RTF_EXPIRES)
403 return time_after(jiffies, rt->dst.expires);
404 else
405 return false;
406}
407
Eric Dumazeta50feda2012-05-18 18:57:34 +0000408static bool rt6_check_expired(const struct rt6_info *rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
David Aherna68886a2018-04-20 15:38:02 -0700410 struct fib6_info *from;
411
412 from = rcu_dereference(rt->from);
413
Gao feng1716a962012-04-06 00:13:10 +0000414 if (rt->rt6i_flags & RTF_EXPIRES) {
415 if (time_after(jiffies, rt->dst.expires))
Eric Dumazeta50feda2012-05-18 18:57:34 +0000416 return true;
David Aherna68886a2018-04-20 15:38:02 -0700417 } else if (from) {
Xin Long1e2ea8a2017-08-26 20:10:10 +0800418 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
David Aherna68886a2018-04-20 15:38:02 -0700419 fib6_check_expired(from);
Gao feng1716a962012-04-06 00:13:10 +0000420 }
Eric Dumazeta50feda2012-05-18 18:57:34 +0000421 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422}
423
David Ahernb1d40992019-04-16 14:35:59 -0700424void fib6_select_path(const struct net *net, struct fib6_result *res,
425 struct flowi6 *fl6, int oif, bool have_oif_match,
426 const struct sk_buff *skb, int strict)
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000427{
David Ahern8d1c8022018-04-17 17:33:26 -0700428 struct fib6_info *sibling, *next_sibling;
David Ahernb1d40992019-04-16 14:35:59 -0700429 struct fib6_info *match = res->f6i;
430
431 if (!match->fib6_nsiblings || have_oif_match)
432 goto out;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000433
Jakub Sitnickib673d6c2017-08-23 09:58:31 +0200434 /* We might have already computed the hash for ICMPv6 errors. In such
435 * case it will always be non-zero. Otherwise now is the time to do it.
436 */
437 if (!fl6->mp_hash)
David Ahernb4bac172018-03-02 08:32:18 -0800438 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
Jakub Sitnickib673d6c2017-08-23 09:58:31 +0200439
David Ahernad1601a2019-03-27 20:53:56 -0700440 if (fl6->mp_hash <= atomic_read(&match->fib6_nh.fib_nh_upper_bound))
David Ahernb1d40992019-04-16 14:35:59 -0700441 goto out;
Ido Schimmelbbfcd772017-11-21 09:50:12 +0200442
David Ahern93c2fb22018-04-18 15:38:59 -0700443 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
444 fib6_siblings) {
David Ahern702cea52019-04-09 14:41:13 -0700445 const struct fib6_nh *nh = &sibling->fib6_nh;
David Ahern5e670d82018-04-17 17:33:14 -0700446 int nh_upper_bound;
447
David Ahern702cea52019-04-09 14:41:13 -0700448 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
David Ahern5e670d82018-04-17 17:33:14 -0700449 if (fl6->mp_hash > nh_upper_bound)
Ido Schimmel3d709f62018-01-09 16:40:27 +0200450 continue;
David Ahern702cea52019-04-09 14:41:13 -0700451 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
Ido Schimmel3d709f62018-01-09 16:40:27 +0200452 break;
453 match = sibling;
454 break;
455 }
456
David Ahernb1d40992019-04-16 14:35:59 -0700457out:
458 res->f6i = match;
459 res->nh = &match->fib6_nh;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000460}
461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462/*
Wei Wang66f5d6c2017-10-06 12:06:10 -0700463 * Route lookup. rcu_read_lock() should be held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 */
465
David Ahern0c59d002019-04-09 14:41:18 -0700466static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
467 const struct in6_addr *saddr, int oif, int flags)
468{
469 const struct net_device *dev;
470
471 if (nh->fib_nh_flags & RTNH_F_DEAD)
472 return false;
473
474 dev = nh->fib_nh_dev;
475 if (oif) {
476 if (dev->ifindex == oif)
477 return true;
478 } else {
479 if (ipv6_chk_addr(net, saddr, dev,
480 flags & RT6_LOOKUP_F_IFACE))
481 return true;
482 }
483
484 return false;
485}
486
David Ahern75ef7382019-04-16 14:36:07 -0700487static void rt6_device_match(struct net *net, struct fib6_result *res,
488 const struct in6_addr *saddr, int oif, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
David Ahern75ef7382019-04-16 14:36:07 -0700490 struct fib6_info *f6i = res->f6i;
491 struct fib6_info *spf6i;
492 struct fib6_nh *nh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
David Ahern75ef7382019-04-16 14:36:07 -0700494 if (!oif && ipv6_addr_any(saddr)) {
495 nh = &f6i->fib6_nh;
David Ahern7d21fec2019-04-16 14:36:11 -0700496 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
497 goto out;
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900498 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499
David Ahern75ef7382019-04-16 14:36:07 -0700500 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
501 nh = &spf6i->fib6_nh;
502 if (__rt6_device_match(net, nh, saddr, oif, flags)) {
503 res->f6i = spf6i;
David Ahern7d21fec2019-04-16 14:36:11 -0700504 goto out;
David Ahern75ef7382019-04-16 14:36:07 -0700505 }
506 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
David Ahern75ef7382019-04-16 14:36:07 -0700508 if (oif && flags & RT6_LOOKUP_F_IFACE) {
509 res->f6i = net->ipv6.fib6_null_entry;
David Ahern7d21fec2019-04-16 14:36:11 -0700510 nh = &res->f6i->fib6_nh;
511 goto out;
David Ahern75ef7382019-04-16 14:36:07 -0700512 }
513
David Ahern7d21fec2019-04-16 14:36:11 -0700514 nh = &f6i->fib6_nh;
515 if (nh->fib_nh_flags & RTNH_F_DEAD) {
David Ahern75ef7382019-04-16 14:36:07 -0700516 res->f6i = net->ipv6.fib6_null_entry;
David Ahern7d21fec2019-04-16 14:36:11 -0700517 nh = &res->f6i->fib6_nh;
David Ahern75ef7382019-04-16 14:36:07 -0700518 }
David Ahern7d21fec2019-04-16 14:36:11 -0700519out:
520 res->nh = nh;
521 res->fib6_type = res->f6i->fib6_type;
522 res->fib6_flags = res->f6i->fib6_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523}
524
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800525#ifdef CONFIG_IPV6_ROUTER_PREF
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200526struct __rt6_probe_work {
527 struct work_struct work;
528 struct in6_addr target;
529 struct net_device *dev;
530};
531
532static void rt6_probe_deferred(struct work_struct *w)
533{
534 struct in6_addr mcaddr;
535 struct __rt6_probe_work *work =
536 container_of(w, struct __rt6_probe_work, work);
537
538 addrconf_addr_solict_mult(&work->target, &mcaddr);
Erik Nordmarkadc176c2016-12-02 14:00:08 -0800539 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200540 dev_put(work->dev);
Michael Büsch662f5532015-02-08 10:14:07 +0100541 kfree(work);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200542}
543
David Aherncc3a86c2019-04-09 14:41:12 -0700544static void rt6_probe(struct fib6_nh *fib6_nh)
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800545{
Sabrina Dubrocaf547fac2018-10-12 16:22:47 +0200546 struct __rt6_probe_work *work = NULL;
David Ahern5e670d82018-04-17 17:33:14 -0700547 const struct in6_addr *nh_gw;
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000548 struct neighbour *neigh;
David Ahern5e670d82018-04-17 17:33:14 -0700549 struct net_device *dev;
Sabrina Dubrocaf547fac2018-10-12 16:22:47 +0200550 struct inet6_dev *idev;
David Ahern5e670d82018-04-17 17:33:14 -0700551
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800552 /*
553 * Okay, this does not seem to be appropriate
554 * for now, however, we need to check if it
555 * is really so; aka Router Reachability Probing.
556 *
557 * Router Reachability Probe MUST be rate-limited
558 * to no more than one per minute.
559 */
David Aherncc3a86c2019-04-09 14:41:12 -0700560 if (fib6_nh->fib_nh_gw_family)
Amerigo Wangfdd66812012-09-10 02:48:44 +0000561 return;
David Ahern5e670d82018-04-17 17:33:14 -0700562
David Aherncc3a86c2019-04-09 14:41:12 -0700563 nh_gw = &fib6_nh->fib_nh_gw6;
564 dev = fib6_nh->fib_nh_dev;
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000565 rcu_read_lock_bh();
Sabrina Dubrocaf547fac2018-10-12 16:22:47 +0200566 idev = __in6_dev_get(dev);
David Ahern5e670d82018-04-17 17:33:14 -0700567 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000568 if (neigh) {
Martin KaFai Lau8d6c31b2015-07-24 09:57:43 -0700569 if (neigh->nud_state & NUD_VALID)
570 goto out;
571
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000572 write_lock(&neigh->lock);
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700573 if (!(neigh->nud_state & NUD_VALID) &&
574 time_after(jiffies,
David Aherndcd1f572018-04-18 15:39:05 -0700575 neigh->updated + idev->cnf.rtr_probe_interval)) {
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700576 work = kmalloc(sizeof(*work), GFP_ATOMIC);
577 if (work)
578 __neigh_set_probe_once(neigh);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200579 }
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000580 write_unlock(&neigh->lock);
David Aherncc3a86c2019-04-09 14:41:12 -0700581 } else if (time_after(jiffies, fib6_nh->last_probe +
Sabrina Dubrocaf547fac2018-10-12 16:22:47 +0200582 idev->cnf.rtr_probe_interval)) {
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700583 work = kmalloc(sizeof(*work), GFP_ATOMIC);
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000584 }
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700585
586 if (work) {
David Aherncc3a86c2019-04-09 14:41:12 -0700587 fib6_nh->last_probe = jiffies;
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700588 INIT_WORK(&work->work, rt6_probe_deferred);
David Ahern5e670d82018-04-17 17:33:14 -0700589 work->target = *nh_gw;
590 dev_hold(dev);
591 work->dev = dev;
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700592 schedule_work(&work->work);
593 }
594
Martin KaFai Lau8d6c31b2015-07-24 09:57:43 -0700595out:
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000596 rcu_read_unlock_bh();
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800597}
598#else
David Aherncc3a86c2019-04-09 14:41:12 -0700599static inline void rt6_probe(struct fib6_nh *fib6_nh)
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800600{
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800601}
602#endif
603
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604/*
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800605 * Default Router Selection (RFC 2461 6.3.6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 */
David Ahern1ba9a892019-04-09 14:41:10 -0700607static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200609 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
David Ahern5e670d82018-04-17 17:33:14 -0700610 struct neighbour *neigh;
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000611
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000612 rcu_read_lock_bh();
David Ahern1ba9a892019-04-09 14:41:10 -0700613 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
614 &fib6_nh->fib_nh_gw6);
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000615 if (neigh) {
616 read_lock(&neigh->lock);
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800617 if (neigh->nud_state & NUD_VALID)
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200618 ret = RT6_NUD_SUCCEED;
YOSHIFUJI Hideaki398bcbe2008-01-19 00:35:16 -0800619#ifdef CONFIG_IPV6_ROUTER_PREF
Paul Marksa5a81f02012-12-03 10:26:54 +0000620 else if (!(neigh->nud_state & NUD_FAILED))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200621 ret = RT6_NUD_SUCCEED;
Jiri Benc7e980562013-12-11 13:48:20 +0100622 else
623 ret = RT6_NUD_FAIL_PROBE;
YOSHIFUJI Hideaki398bcbe2008-01-19 00:35:16 -0800624#endif
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000625 read_unlock(&neigh->lock);
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200626 } else {
627 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
Jiri Benc7e980562013-12-11 13:48:20 +0100628 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
Paul Marksa5a81f02012-12-03 10:26:54 +0000629 }
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000630 rcu_read_unlock_bh();
631
Paul Marksa5a81f02012-12-03 10:26:54 +0000632 return ret;
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800633}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
David Ahern702cea52019-04-09 14:41:13 -0700635static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
636 int strict)
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800637{
David Ahern6e1809a2019-04-09 14:41:11 -0700638 int m = 0;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900639
David Ahern6e1809a2019-04-09 14:41:11 -0700640 if (!oif || nh->fib_nh_dev->ifindex == oif)
641 m = 2;
642
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -0700643 if (!m && (strict & RT6_LOOKUP_F_IFACE))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200644 return RT6_NUD_FAIL_HARD;
YOSHIFUJI Hideakiebacaaa2006-03-20 17:04:53 -0800645#ifdef CONFIG_IPV6_ROUTER_PREF
David Ahern702cea52019-04-09 14:41:13 -0700646 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
YOSHIFUJI Hideakiebacaaa2006-03-20 17:04:53 -0800647#endif
David Ahern1ba9a892019-04-09 14:41:10 -0700648 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
David Ahern702cea52019-04-09 14:41:13 -0700649 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
David Ahern1ba9a892019-04-09 14:41:10 -0700650 int n = rt6_check_neigh(nh);
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200651 if (n < 0)
652 return n;
653 }
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800654 return m;
655}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
David Ahern28679ed2019-04-09 14:41:14 -0700657static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
658 int oif, int strict, int *mpri, bool *do_rr)
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800659{
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200660 bool match_do_rr = false;
David Ahern28679ed2019-04-09 14:41:14 -0700661 bool rc = false;
662 int m;
Andy Gospodarek35103d12015-08-13 10:39:01 -0400663
David Ahern28679ed2019-04-09 14:41:14 -0700664 if (nh->fib_nh_flags & RTNH_F_DEAD)
Ido Schimmel8067bb82018-01-07 12:45:09 +0200665 goto out;
666
David Ahern28679ed2019-04-09 14:41:14 -0700667 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
668 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
David Ahernd5d32e42016-10-24 12:27:23 -0700669 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
Andy Gospodarek35103d12015-08-13 10:39:01 -0400670 goto out;
David S. Millerf11e6652007-03-24 20:36:25 -0700671
David Ahern28679ed2019-04-09 14:41:14 -0700672 m = rt6_score_route(nh, fib6_flags, oif, strict);
Jiri Benc7e980562013-12-11 13:48:20 +0100673 if (m == RT6_NUD_FAIL_DO_RR) {
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200674 match_do_rr = true;
675 m = 0; /* lowest valid score */
Jiri Benc7e980562013-12-11 13:48:20 +0100676 } else if (m == RT6_NUD_FAIL_HARD) {
David S. Millerf11e6652007-03-24 20:36:25 -0700677 goto out;
David S. Millerf11e6652007-03-24 20:36:25 -0700678 }
679
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200680 if (strict & RT6_LOOKUP_F_REACHABLE)
David Ahern28679ed2019-04-09 14:41:14 -0700681 rt6_probe(nh);
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200682
Jiri Benc7e980562013-12-11 13:48:20 +0100683 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200684 if (m > *mpri) {
685 *do_rr = match_do_rr;
686 *mpri = m;
David Ahern28679ed2019-04-09 14:41:14 -0700687 rc = true;
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200688 }
David S. Millerf11e6652007-03-24 20:36:25 -0700689out:
David Ahern28679ed2019-04-09 14:41:14 -0700690 return rc;
David S. Millerf11e6652007-03-24 20:36:25 -0700691}
692
David Ahernb7bc4b62019-04-16 14:36:08 -0700693static void __find_rr_leaf(struct fib6_info *f6i_start,
David Ahern30c15f02019-04-09 14:41:15 -0700694 struct fib6_info *nomatch, u32 metric,
David Ahernb7bc4b62019-04-16 14:36:08 -0700695 struct fib6_result *res, struct fib6_info **cont,
David Ahern30c15f02019-04-09 14:41:15 -0700696 int oif, int strict, bool *do_rr, int *mpri)
David S. Millerf11e6652007-03-24 20:36:25 -0700697{
David Ahernb7bc4b62019-04-16 14:36:08 -0700698 struct fib6_info *f6i;
David Ahern30c15f02019-04-09 14:41:15 -0700699
David Ahernb7bc4b62019-04-16 14:36:08 -0700700 for (f6i = f6i_start;
701 f6i && f6i != nomatch;
702 f6i = rcu_dereference(f6i->fib6_next)) {
David Ahern30c15f02019-04-09 14:41:15 -0700703 struct fib6_nh *nh;
704
David Ahernb7bc4b62019-04-16 14:36:08 -0700705 if (cont && f6i->fib6_metric != metric) {
706 *cont = f6i;
David Ahern30c15f02019-04-09 14:41:15 -0700707 return;
708 }
709
David Ahernb7bc4b62019-04-16 14:36:08 -0700710 if (fib6_check_expired(f6i))
David Ahern30c15f02019-04-09 14:41:15 -0700711 continue;
712
David Ahernb7bc4b62019-04-16 14:36:08 -0700713 nh = &f6i->fib6_nh;
714 if (find_match(nh, f6i->fib6_flags, oif, strict, mpri, do_rr)) {
715 res->f6i = f6i;
716 res->nh = nh;
David Ahern7d21fec2019-04-16 14:36:11 -0700717 res->fib6_flags = f6i->fib6_flags;
718 res->fib6_type = f6i->fib6_type;
David Ahernb7bc4b62019-04-16 14:36:08 -0700719 }
David Ahern30c15f02019-04-09 14:41:15 -0700720 }
721}
722
David Ahernb7bc4b62019-04-16 14:36:08 -0700723static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
724 struct fib6_info *rr_head, int oif, int strict,
725 bool *do_rr, struct fib6_result *res)
David Ahern30c15f02019-04-09 14:41:15 -0700726{
David Ahernb7bc4b62019-04-16 14:36:08 -0700727 u32 metric = rr_head->fib6_metric;
728 struct fib6_info *cont = NULL;
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800729 int mpri = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
David Ahernb7bc4b62019-04-16 14:36:08 -0700731 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
David Ahern30c15f02019-04-09 14:41:15 -0700732 oif, strict, do_rr, &mpri);
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700733
David Ahernb7bc4b62019-04-16 14:36:08 -0700734 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
David Ahern30c15f02019-04-09 14:41:15 -0700735 oif, strict, do_rr, &mpri);
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700736
David Ahernb7bc4b62019-04-16 14:36:08 -0700737 if (res->f6i || !cont)
738 return;
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700739
David Ahernb7bc4b62019-04-16 14:36:08 -0700740 __find_rr_leaf(cont, NULL, metric, res, NULL,
David Ahern30c15f02019-04-09 14:41:15 -0700741 oif, strict, do_rr, &mpri);
David S. Millerf11e6652007-03-24 20:36:25 -0700742}
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800743
David Ahernb7bc4b62019-04-16 14:36:08 -0700744static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
745 struct fib6_result *res, int strict)
David S. Millerf11e6652007-03-24 20:36:25 -0700746{
David Ahern8d1c8022018-04-17 17:33:26 -0700747 struct fib6_info *leaf = rcu_dereference(fn->leaf);
David Ahernb7bc4b62019-04-16 14:36:08 -0700748 struct fib6_info *rt0;
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200749 bool do_rr = false;
Wei Wang17ecf592017-10-06 12:06:09 -0700750 int key_plen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
David Ahernb7bc4b62019-04-16 14:36:08 -0700752 /* make sure this function or its helpers sets f6i */
753 res->f6i = NULL;
754
David Ahern421842e2018-04-17 17:33:18 -0700755 if (!leaf || leaf == net->ipv6.fib6_null_entry)
David Ahernb7bc4b62019-04-16 14:36:08 -0700756 goto out;
Wei Wang8d1040e2017-10-06 12:06:08 -0700757
Wei Wang66f5d6c2017-10-06 12:06:10 -0700758 rt0 = rcu_dereference(fn->rr_ptr);
David S. Millerf11e6652007-03-24 20:36:25 -0700759 if (!rt0)
Wei Wang66f5d6c2017-10-06 12:06:10 -0700760 rt0 = leaf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Wei Wang17ecf592017-10-06 12:06:09 -0700762 /* Double check to make sure fn is not an intermediate node
763 * and fn->leaf does not points to its child's leaf
764 * (This might happen if all routes under fn are deleted from
765 * the tree and fib6_repair_tree() is called on the node.)
766 */
David Ahern93c2fb22018-04-18 15:38:59 -0700767 key_plen = rt0->fib6_dst.plen;
Wei Wang17ecf592017-10-06 12:06:09 -0700768#ifdef CONFIG_IPV6_SUBTREES
David Ahern93c2fb22018-04-18 15:38:59 -0700769 if (rt0->fib6_src.plen)
770 key_plen = rt0->fib6_src.plen;
Wei Wang17ecf592017-10-06 12:06:09 -0700771#endif
772 if (fn->fn_bit != key_plen)
David Ahernb7bc4b62019-04-16 14:36:08 -0700773 goto out;
Wei Wang17ecf592017-10-06 12:06:09 -0700774
David Ahernb7bc4b62019-04-16 14:36:08 -0700775 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200776 if (do_rr) {
David Ahern8fb11a92018-05-04 13:54:24 -0700777 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
David S. Millerf11e6652007-03-24 20:36:25 -0700778
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800779 /* no entries matched; do round-robin */
David Ahern93c2fb22018-04-18 15:38:59 -0700780 if (!next || next->fib6_metric != rt0->fib6_metric)
Wei Wang8d1040e2017-10-06 12:06:08 -0700781 next = leaf;
David S. Millerf11e6652007-03-24 20:36:25 -0700782
Wei Wang66f5d6c2017-10-06 12:06:10 -0700783 if (next != rt0) {
David Ahern93c2fb22018-04-18 15:38:59 -0700784 spin_lock_bh(&leaf->fib6_table->tb6_lock);
Wei Wang66f5d6c2017-10-06 12:06:10 -0700785 /* make sure next is not being deleted from the tree */
David Ahern93c2fb22018-04-18 15:38:59 -0700786 if (next->fib6_node)
Wei Wang66f5d6c2017-10-06 12:06:10 -0700787 rcu_assign_pointer(fn->rr_ptr, next);
David Ahern93c2fb22018-04-18 15:38:59 -0700788 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
Wei Wang66f5d6c2017-10-06 12:06:10 -0700789 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 }
791
David Ahernb7bc4b62019-04-16 14:36:08 -0700792out:
793 if (!res->f6i) {
794 res->f6i = net->ipv6.fib6_null_entry;
795 res->nh = &res->f6i->fib6_nh;
David Ahern7d21fec2019-04-16 14:36:11 -0700796 res->fib6_flags = res->f6i->fib6_flags;
797 res->fib6_type = res->f6i->fib6_type;
David Ahernb7bc4b62019-04-16 14:36:08 -0700798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799}
800
David Ahern85bd05d2019-04-16 14:36:01 -0700801static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
Martin KaFai Lau8b9df262015-05-22 20:55:59 -0700802{
David Ahern85bd05d2019-04-16 14:36:01 -0700803 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
804 res->nh->fib_nh_gw_family;
Martin KaFai Lau8b9df262015-05-22 20:55:59 -0700805}
806
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800807#ifdef CONFIG_IPV6_ROUTE_INFO
808int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000809 const struct in6_addr *gwaddr)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800810{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900811 struct net *net = dev_net(dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800812 struct route_info *rinfo = (struct route_info *) opt;
813 struct in6_addr prefix_buf, *prefix;
814 unsigned int pref;
YOSHIFUJI Hideaki4bed72e2008-05-27 17:37:49 +0900815 unsigned long lifetime;
David Ahern8d1c8022018-04-17 17:33:26 -0700816 struct fib6_info *rt;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800817
818 if (len < sizeof(struct route_info)) {
819 return -EINVAL;
820 }
821
822 /* Sanity check for prefix_len and length */
823 if (rinfo->length > 3) {
824 return -EINVAL;
825 } else if (rinfo->prefix_len > 128) {
826 return -EINVAL;
827 } else if (rinfo->prefix_len > 64) {
828 if (rinfo->length < 2) {
829 return -EINVAL;
830 }
831 } else if (rinfo->prefix_len > 0) {
832 if (rinfo->length < 1) {
833 return -EINVAL;
834 }
835 }
836
837 pref = rinfo->route_pref;
838 if (pref == ICMPV6_ROUTER_PREF_INVALID)
Jens Rosenboom3933fc92009-09-10 06:25:11 +0000839 return -EINVAL;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800840
YOSHIFUJI Hideaki4bed72e2008-05-27 17:37:49 +0900841 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800842
843 if (rinfo->length == 3)
844 prefix = (struct in6_addr *)rinfo->prefix;
845 else {
846 /* this function is safe */
847 ipv6_addr_prefix(&prefix_buf,
848 (struct in6_addr *)rinfo->prefix,
849 rinfo->prefix_len);
850 prefix = &prefix_buf;
851 }
852
Duan Jiongf104a562013-11-08 09:56:53 +0800853 if (rinfo->prefix_len == 0)
David Ahernafb1d4b52018-04-17 17:33:11 -0700854 rt = rt6_get_dflt_router(net, gwaddr, dev);
Duan Jiongf104a562013-11-08 09:56:53 +0800855 else
856 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
David Ahern830218c2016-10-24 10:52:35 -0700857 gwaddr, dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800858
859 if (rt && !lifetime) {
David Ahernafb1d4b52018-04-17 17:33:11 -0700860 ip6_del_rt(net, rt);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800861 rt = NULL;
862 }
863
864 if (!rt && lifetime)
David Ahern830218c2016-10-24 10:52:35 -0700865 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
866 dev, pref);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800867 else if (rt)
David Ahern93c2fb22018-04-18 15:38:59 -0700868 rt->fib6_flags = RTF_ROUTEINFO |
869 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800870
871 if (rt) {
Gao feng1716a962012-04-06 00:13:10 +0000872 if (!addrconf_finite_timeout(lifetime))
David Ahern14895682018-04-17 17:33:17 -0700873 fib6_clean_expires(rt);
Gao feng1716a962012-04-06 00:13:10 +0000874 else
David Ahern14895682018-04-17 17:33:17 -0700875 fib6_set_expires(rt, jiffies + HZ * lifetime);
Gao feng1716a962012-04-06 00:13:10 +0000876
David Ahern93531c62018-04-17 17:33:25 -0700877 fib6_info_release(rt);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800878 }
879 return 0;
880}
881#endif
882
David Ahernae90d862018-04-17 17:33:12 -0700883/*
884 * Misc support functions
885 */
886
887/* called with rcu_lock held */
David Ahern0d161582019-04-16 14:36:04 -0700888static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
David Ahernae90d862018-04-17 17:33:12 -0700889{
David Ahern0d161582019-04-16 14:36:04 -0700890 struct net_device *dev = res->nh->fib_nh_dev;
David Ahernae90d862018-04-17 17:33:12 -0700891
David Ahern7d21fec2019-04-16 14:36:11 -0700892 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
David Ahernae90d862018-04-17 17:33:12 -0700893 /* for copies of local routes, dst->dev needs to be the
894 * device if it is a master device, the master device if
895 * device is enslaved, and the loopback as the default
896 */
897 if (netif_is_l3_slave(dev) &&
David Ahern7d21fec2019-04-16 14:36:11 -0700898 !rt6_need_strict(&res->f6i->fib6_dst.addr))
David Ahernae90d862018-04-17 17:33:12 -0700899 dev = l3mdev_master_dev_rcu(dev);
900 else if (!netif_is_l3_master(dev))
901 dev = dev_net(dev)->loopback_dev;
902 /* last case is netif_is_l3_master(dev) is true in which
903 * case we want dev returned to be dev
904 */
905 }
906
907 return dev;
908}
909
David Ahern6edb3c92018-04-17 17:33:15 -0700910static const int fib6_prop[RTN_MAX + 1] = {
911 [RTN_UNSPEC] = 0,
912 [RTN_UNICAST] = 0,
913 [RTN_LOCAL] = 0,
914 [RTN_BROADCAST] = 0,
915 [RTN_ANYCAST] = 0,
916 [RTN_MULTICAST] = 0,
917 [RTN_BLACKHOLE] = -EINVAL,
918 [RTN_UNREACHABLE] = -EHOSTUNREACH,
919 [RTN_PROHIBIT] = -EACCES,
920 [RTN_THROW] = -EAGAIN,
921 [RTN_NAT] = -EINVAL,
922 [RTN_XRESOLVE] = -EINVAL,
923};
924
925static int ip6_rt_type_to_error(u8 fib6_type)
926{
927 return fib6_prop[fib6_type];
928}
929
David Ahern8d1c8022018-04-17 17:33:26 -0700930static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
David Ahern3b6761d2018-04-17 17:33:20 -0700931{
932 unsigned short flags = 0;
933
934 if (rt->dst_nocount)
935 flags |= DST_NOCOUNT;
936 if (rt->dst_nopolicy)
937 flags |= DST_NOPOLICY;
938 if (rt->dst_host)
939 flags |= DST_HOST;
940
941 return flags;
942}
943
David Ahern7d21fec2019-04-16 14:36:11 -0700944static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
David Ahern6edb3c92018-04-17 17:33:15 -0700945{
David Ahern7d21fec2019-04-16 14:36:11 -0700946 rt->dst.error = ip6_rt_type_to_error(fib6_type);
David Ahern6edb3c92018-04-17 17:33:15 -0700947
David Ahern7d21fec2019-04-16 14:36:11 -0700948 switch (fib6_type) {
David Ahern6edb3c92018-04-17 17:33:15 -0700949 case RTN_BLACKHOLE:
950 rt->dst.output = dst_discard_out;
951 rt->dst.input = dst_discard;
952 break;
953 case RTN_PROHIBIT:
954 rt->dst.output = ip6_pkt_prohibit_out;
955 rt->dst.input = ip6_pkt_prohibit;
956 break;
957 case RTN_THROW:
958 case RTN_UNREACHABLE:
959 default:
960 rt->dst.output = ip6_pkt_discard_out;
961 rt->dst.input = ip6_pkt_discard;
962 break;
963 }
964}
965
David Ahern0d161582019-04-16 14:36:04 -0700966static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
David Ahern6edb3c92018-04-17 17:33:15 -0700967{
David Ahern7d21fec2019-04-16 14:36:11 -0700968 struct fib6_info *f6i = res->f6i;
David Ahern0d161582019-04-16 14:36:04 -0700969
David Ahern7d21fec2019-04-16 14:36:11 -0700970 if (res->fib6_flags & RTF_REJECT) {
971 ip6_rt_init_dst_reject(rt, res->fib6_type);
David Ahern6edb3c92018-04-17 17:33:15 -0700972 return;
973 }
974
975 rt->dst.error = 0;
976 rt->dst.output = ip6_output;
977
David Ahern7d21fec2019-04-16 14:36:11 -0700978 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
David Ahern6edb3c92018-04-17 17:33:15 -0700979 rt->dst.input = ip6_input;
David Ahern7d21fec2019-04-16 14:36:11 -0700980 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
David Ahern6edb3c92018-04-17 17:33:15 -0700981 rt->dst.input = ip6_mc_input;
982 } else {
983 rt->dst.input = ip6_forward;
984 }
985
David Ahern0d161582019-04-16 14:36:04 -0700986 if (res->nh->fib_nh_lws) {
987 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
David Ahern6edb3c92018-04-17 17:33:15 -0700988 lwtunnel_set_redirect(&rt->dst);
989 }
990
991 rt->dst.lastuse = jiffies;
992}
993
Wei Wange873e4b2018-07-21 20:56:32 -0700994/* Caller must already hold reference to @from */
David Ahern8d1c8022018-04-17 17:33:26 -0700995static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
David Ahernae90d862018-04-17 17:33:12 -0700996{
David Ahernae90d862018-04-17 17:33:12 -0700997 rt->rt6i_flags &= ~RTF_EXPIRES;
David Aherna68886a2018-04-20 15:38:02 -0700998 rcu_assign_pointer(rt->from, from);
David Aherne1255ed2018-10-04 20:07:53 -0700999 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
David Ahernae90d862018-04-17 17:33:12 -07001000}
1001
David Ahern0d161582019-04-16 14:36:04 -07001002/* Caller must already hold reference to f6i in result */
1003static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
David Ahernae90d862018-04-17 17:33:12 -07001004{
David Ahern0d161582019-04-16 14:36:04 -07001005 const struct fib6_nh *nh = res->nh;
1006 const struct net_device *dev = nh->fib_nh_dev;
1007 struct fib6_info *f6i = res->f6i;
David Aherndcd1f572018-04-18 15:39:05 -07001008
David Ahern0d161582019-04-16 14:36:04 -07001009 ip6_rt_init_dst(rt, res);
David Ahern6edb3c92018-04-17 17:33:15 -07001010
David Ahern0d161582019-04-16 14:36:04 -07001011 rt->rt6i_dst = f6i->fib6_dst;
David Aherndcd1f572018-04-18 15:39:05 -07001012 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
David Ahern7d21fec2019-04-16 14:36:11 -07001013 rt->rt6i_flags = res->fib6_flags;
David Ahern0d161582019-04-16 14:36:04 -07001014 if (nh->fib_nh_gw_family) {
1015 rt->rt6i_gateway = nh->fib_nh_gw6;
David Ahern2b2450c2019-03-27 20:53:52 -07001016 rt->rt6i_flags |= RTF_GATEWAY;
1017 }
David Ahern0d161582019-04-16 14:36:04 -07001018 rt6_set_from(rt, f6i);
David Ahernae90d862018-04-17 17:33:12 -07001019#ifdef CONFIG_IPV6_SUBTREES
David Ahern0d161582019-04-16 14:36:04 -07001020 rt->rt6i_src = f6i->fib6_src;
David Ahernae90d862018-04-17 17:33:12 -07001021#endif
David Ahernae90d862018-04-17 17:33:12 -07001022}
1023
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001024static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1025 struct in6_addr *saddr)
1026{
Wei Wang66f5d6c2017-10-06 12:06:10 -07001027 struct fib6_node *pn, *sn;
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001028 while (1) {
1029 if (fn->fn_flags & RTN_TL_ROOT)
1030 return NULL;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001031 pn = rcu_dereference(fn->parent);
1032 sn = FIB6_SUBTREE(pn);
1033 if (sn && sn != fn)
David Ahern64547432018-05-09 20:34:19 -07001034 fn = fib6_node_lookup(sn, NULL, saddr);
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001035 else
1036 fn = pn;
1037 if (fn->fn_flags & RTN_RTINFO)
1038 return fn;
1039 }
1040}
Thomas Grafc71099a2006-08-04 23:20:06 -07001041
David Ahern10585b42019-03-20 09:24:50 -07001042static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
Wei Wangd3843fe2017-10-06 12:06:06 -07001043{
1044 struct rt6_info *rt = *prt;
1045
1046 if (dst_hold_safe(&rt->dst))
1047 return true;
David Ahern10585b42019-03-20 09:24:50 -07001048 if (net) {
Wei Wangd3843fe2017-10-06 12:06:06 -07001049 rt = net->ipv6.ip6_null_entry;
1050 dst_hold(&rt->dst);
1051 } else {
1052 rt = NULL;
1053 }
1054 *prt = rt;
1055 return false;
1056}
1057
David Aherndec9b0e2018-04-17 17:33:19 -07001058/* called with rcu_lock held */
David Ahern9b6b35a2019-04-16 14:36:02 -07001059static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
David Aherndec9b0e2018-04-17 17:33:19 -07001060{
David Ahern9b6b35a2019-04-16 14:36:02 -07001061 struct net_device *dev = res->nh->fib_nh_dev;
1062 struct fib6_info *f6i = res->f6i;
1063 unsigned short flags;
David Aherndec9b0e2018-04-17 17:33:19 -07001064 struct rt6_info *nrt;
1065
David Ahern9b6b35a2019-04-16 14:36:02 -07001066 if (!fib6_info_hold_safe(f6i))
Xin Long1c87e792019-03-20 14:45:48 +08001067 goto fallback;
Wei Wange873e4b2018-07-21 20:56:32 -07001068
David Ahern9b6b35a2019-04-16 14:36:02 -07001069 flags = fib6_info_dst_flags(f6i);
David Ahern93531c62018-04-17 17:33:25 -07001070 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
Xin Long1c87e792019-03-20 14:45:48 +08001071 if (!nrt) {
David Ahern9b6b35a2019-04-16 14:36:02 -07001072 fib6_info_release(f6i);
Xin Long1c87e792019-03-20 14:45:48 +08001073 goto fallback;
1074 }
David Aherndec9b0e2018-04-17 17:33:19 -07001075
David Ahern0d161582019-04-16 14:36:04 -07001076 ip6_rt_copy_init(nrt, res);
Xin Long1c87e792019-03-20 14:45:48 +08001077 return nrt;
1078
1079fallback:
1080 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1081 dst_hold(&nrt->dst);
David Aherndec9b0e2018-04-17 17:33:19 -07001082 return nrt;
1083}
1084
Daniel Lezcano8ed67782008-03-04 13:48:30 -08001085static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1086 struct fib6_table *table,
David Ahernb75cc8f2018-03-02 08:32:17 -08001087 struct flowi6 *fl6,
1088 const struct sk_buff *skb,
1089 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090{
David Ahernb1d40992019-04-16 14:35:59 -07001091 struct fib6_result res = {};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 struct fib6_node *fn;
David Ahern23fb93a2018-04-17 17:33:23 -07001093 struct rt6_info *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094
David Ahernb6cdbc82018-03-29 17:44:57 -07001095 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1096 flags &= ~RT6_LOOKUP_F_IFACE;
1097
Wei Wang66f5d6c2017-10-06 12:06:10 -07001098 rcu_read_lock();
David Ahern64547432018-05-09 20:34:19 -07001099 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
Thomas Grafc71099a2006-08-04 23:20:06 -07001100restart:
David Ahernb1d40992019-04-16 14:35:59 -07001101 res.f6i = rcu_dereference(fn->leaf);
1102 if (!res.f6i)
1103 res.f6i = net->ipv6.fib6_null_entry;
David Ahernaf52a522019-04-09 14:41:16 -07001104 else
David Ahern75ef7382019-04-16 14:36:07 -07001105 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1106 flags);
David Ahernaf52a522019-04-09 14:41:16 -07001107
David Ahernb1d40992019-04-16 14:35:59 -07001108 if (res.f6i == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001109 fn = fib6_backtrack(fn, &fl6->saddr);
1110 if (fn)
1111 goto restart;
David Ahernaf52a522019-04-09 14:41:16 -07001112
1113 rt = net->ipv6.ip6_null_entry;
1114 dst_hold(&rt->dst);
1115 goto out;
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001116 }
Wei Wang2b760fc2017-10-06 12:06:03 -07001117
David Ahernb1d40992019-04-16 14:35:59 -07001118 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1119 fl6->flowi6_oif != 0, skb, flags);
1120
David S. Miller4c9483b2011-03-12 16:22:43 -05001121 /* Search through exception table */
David Ahern7e4b5122019-04-16 14:36:00 -07001122 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
David Ahern23fb93a2018-04-17 17:33:23 -07001123 if (rt) {
David Ahern10585b42019-03-20 09:24:50 -07001124 if (ip6_hold_safe(net, &rt))
David Aherndec9b0e2018-04-17 17:33:19 -07001125 dst_use_noref(&rt->dst, jiffies);
David Ahern23fb93a2018-04-17 17:33:23 -07001126 } else {
David Ahern9b6b35a2019-04-16 14:36:02 -07001127 rt = ip6_create_rt_rcu(&res);
David Aherndec9b0e2018-04-17 17:33:19 -07001128 }
Wei Wangd3843fe2017-10-06 12:06:06 -07001129
David Ahernaf52a522019-04-09 14:41:16 -07001130out:
David Ahern8ff2e5b2019-04-16 14:36:09 -07001131 trace_fib6_table_lookup(net, &res, table, fl6);
David Ahernaf52a522019-04-09 14:41:16 -07001132
Wei Wang66f5d6c2017-10-06 12:06:10 -07001133 rcu_read_unlock();
David Ahernb8115802015-11-19 12:24:22 -08001134
Thomas Grafc71099a2006-08-04 23:20:06 -07001135 return rt;
Thomas Grafc71099a2006-08-04 23:20:06 -07001136}
1137
Ian Morris67ba4152014-08-24 21:53:10 +01001138struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
David Ahernb75cc8f2018-03-02 08:32:17 -08001139 const struct sk_buff *skb, int flags)
Florian Westphalea6e5742011-09-05 16:05:44 +02001140{
David Ahernb75cc8f2018-03-02 08:32:17 -08001141 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
Florian Westphalea6e5742011-09-05 16:05:44 +02001142}
1143EXPORT_SYMBOL_GPL(ip6_route_lookup);
1144
YOSHIFUJI Hideaki9acd9f32008-04-10 15:42:10 +09001145struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
David Ahernb75cc8f2018-03-02 08:32:17 -08001146 const struct in6_addr *saddr, int oif,
1147 const struct sk_buff *skb, int strict)
Thomas Grafc71099a2006-08-04 23:20:06 -07001148{
David S. Miller4c9483b2011-03-12 16:22:43 -05001149 struct flowi6 fl6 = {
1150 .flowi6_oif = oif,
1151 .daddr = *daddr,
Thomas Grafc71099a2006-08-04 23:20:06 -07001152 };
1153 struct dst_entry *dst;
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -07001154 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
Thomas Grafc71099a2006-08-04 23:20:06 -07001155
Thomas Grafadaa70b2006-10-13 15:01:03 -07001156 if (saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -05001157 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
Thomas Grafadaa70b2006-10-13 15:01:03 -07001158 flags |= RT6_LOOKUP_F_HAS_SADDR;
1159 }
1160
David Ahernb75cc8f2018-03-02 08:32:17 -08001161 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
Thomas Grafc71099a2006-08-04 23:20:06 -07001162 if (dst->error == 0)
1163 return (struct rt6_info *) dst;
1164
1165 dst_release(dst);
1166
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 return NULL;
1168}
YOSHIFUJI Hideaki71590392007-02-22 22:05:40 +09001169EXPORT_SYMBOL(rt6_lookup);
1170
Thomas Grafc71099a2006-08-04 23:20:06 -07001171/* ip6_ins_rt is called with FREE table->tb6_lock.
Wei Wang1cfb71e2017-06-17 10:42:33 -07001172 * It takes new route entry, the addition fails by any reason the
1173 * route is released.
1174 * Caller must hold dst before calling it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175 */
1176
David Ahern8d1c8022018-04-17 17:33:26 -07001177static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
David Ahern333c4302017-05-21 10:12:04 -06001178 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179{
1180 int err;
Thomas Grafc71099a2006-08-04 23:20:06 -07001181 struct fib6_table *table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
David Ahern93c2fb22018-04-18 15:38:59 -07001183 table = rt->fib6_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001184 spin_lock_bh(&table->tb6_lock);
David Ahernd4ead6b2018-04-17 17:33:16 -07001185 err = fib6_add(&table->tb6_root, rt, info, extack);
Wei Wang66f5d6c2017-10-06 12:06:10 -07001186 spin_unlock_bh(&table->tb6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
1188 return err;
1189}
1190
David Ahern8d1c8022018-04-17 17:33:26 -07001191int ip6_ins_rt(struct net *net, struct fib6_info *rt)
Thomas Graf40e22e82006-08-22 00:00:45 -07001192{
David Ahernafb1d4b52018-04-17 17:33:11 -07001193 struct nl_info info = { .nl_net = net, };
Florian Westphale715b6d2015-01-05 23:57:44 +01001194
David Ahernd4ead6b2018-04-17 17:33:16 -07001195 return __ip6_ins_rt(rt, &info, NULL);
Thomas Graf40e22e82006-08-22 00:00:45 -07001196}
1197
David Ahern85bd05d2019-04-16 14:36:01 -07001198static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
Martin KaFai Lau8b9df262015-05-22 20:55:59 -07001199 const struct in6_addr *daddr,
1200 const struct in6_addr *saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201{
David Ahern85bd05d2019-04-16 14:36:01 -07001202 struct fib6_info *f6i = res->f6i;
David Ahern4832c302017-08-17 12:17:20 -07001203 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 struct rt6_info *rt;
1205
1206 /*
1207 * Clone the route.
1208 */
1209
David Ahern85bd05d2019-04-16 14:36:01 -07001210 if (!fib6_info_hold_safe(f6i))
Wei Wange873e4b2018-07-21 20:56:32 -07001211 return NULL;
1212
David Ahern0d161582019-04-16 14:36:04 -07001213 dev = ip6_rt_get_dev_rcu(res);
David Ahern93531c62018-04-17 17:33:25 -07001214 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
Wei Wange873e4b2018-07-21 20:56:32 -07001215 if (!rt) {
David Ahern85bd05d2019-04-16 14:36:01 -07001216 fib6_info_release(f6i);
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001217 return NULL;
Wei Wange873e4b2018-07-21 20:56:32 -07001218 }
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001219
David Ahern0d161582019-04-16 14:36:04 -07001220 ip6_rt_copy_init(rt, res);
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001221 rt->rt6i_flags |= RTF_CACHE;
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001222 rt->dst.flags |= DST_HOST;
1223 rt->rt6i_dst.addr = *daddr;
1224 rt->rt6i_dst.plen = 128;
1225
David Ahern85bd05d2019-04-16 14:36:01 -07001226 if (!rt6_is_gw_or_nonexthop(res)) {
1227 if (f6i->fib6_dst.plen != 128 &&
1228 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001229 rt->rt6i_flags |= RTF_ANYCAST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230#ifdef CONFIG_IPV6_SUBTREES
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001231 if (rt->rt6i_src.plen && saddr) {
1232 rt->rt6i_src.addr = *saddr;
1233 rt->rt6i_src.plen = 128;
Martin KaFai Lau8b9df262015-05-22 20:55:59 -07001234 }
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001235#endif
YOSHIFUJI Hideaki95a9a5b2006-03-20 16:55:51 -08001236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
YOSHIFUJI Hideaki95a9a5b2006-03-20 16:55:51 -08001238 return rt;
1239}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240
David Aherndb3fede2019-04-16 14:36:03 -07001241static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001242{
David Aherndb3fede2019-04-16 14:36:03 -07001243 struct fib6_info *f6i = res->f6i;
1244 unsigned short flags = fib6_info_dst_flags(f6i);
David Ahern4832c302017-08-17 12:17:20 -07001245 struct net_device *dev;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001246 struct rt6_info *pcpu_rt;
1247
David Aherndb3fede2019-04-16 14:36:03 -07001248 if (!fib6_info_hold_safe(f6i))
Wei Wange873e4b2018-07-21 20:56:32 -07001249 return NULL;
1250
David Ahern4832c302017-08-17 12:17:20 -07001251 rcu_read_lock();
David Ahern0d161582019-04-16 14:36:04 -07001252 dev = ip6_rt_get_dev_rcu(res);
David Ahern93531c62018-04-17 17:33:25 -07001253 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
David Ahern4832c302017-08-17 12:17:20 -07001254 rcu_read_unlock();
Wei Wange873e4b2018-07-21 20:56:32 -07001255 if (!pcpu_rt) {
David Aherndb3fede2019-04-16 14:36:03 -07001256 fib6_info_release(f6i);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001257 return NULL;
Wei Wange873e4b2018-07-21 20:56:32 -07001258 }
David Ahern0d161582019-04-16 14:36:04 -07001259 ip6_rt_copy_init(pcpu_rt, res);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001260 pcpu_rt->rt6i_flags |= RTF_PCPU;
1261 return pcpu_rt;
1262}
1263
Wei Wang66f5d6c2017-10-06 12:06:10 -07001264/* It should be called with rcu_read_lock() acquired */
David Aherndb3fede2019-04-16 14:36:03 -07001265static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001266{
Martin KaFai Laua73e4192015-08-14 11:05:53 -07001267 struct rt6_info *pcpu_rt, **p;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001268
David Aherndb3fede2019-04-16 14:36:03 -07001269 p = this_cpu_ptr(res->f6i->rt6i_pcpu);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001270 pcpu_rt = *p;
1271
David Ahernd4ead6b2018-04-17 17:33:16 -07001272 if (pcpu_rt)
David Ahern10585b42019-03-20 09:24:50 -07001273 ip6_hold_safe(NULL, &pcpu_rt);
Wei Wangd3843fe2017-10-06 12:06:06 -07001274
Martin KaFai Laua73e4192015-08-14 11:05:53 -07001275 return pcpu_rt;
1276}
1277
David Ahernafb1d4b52018-04-17 17:33:11 -07001278static struct rt6_info *rt6_make_pcpu_route(struct net *net,
David Aherndb3fede2019-04-16 14:36:03 -07001279 const struct fib6_result *res)
Martin KaFai Laua73e4192015-08-14 11:05:53 -07001280{
1281 struct rt6_info *pcpu_rt, *prev, **p;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001282
David Aherndb3fede2019-04-16 14:36:03 -07001283 pcpu_rt = ip6_rt_pcpu_alloc(res);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001284 if (!pcpu_rt) {
Martin KaFai Lau9c7370a2015-08-14 11:05:54 -07001285 dst_hold(&net->ipv6.ip6_null_entry->dst);
1286 return net->ipv6.ip6_null_entry;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001287 }
1288
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001289 dst_hold(&pcpu_rt->dst);
David Aherndb3fede2019-04-16 14:36:03 -07001290 p = this_cpu_ptr(res->f6i->rt6i_pcpu);
Wei Wanga94b9362017-10-06 12:06:04 -07001291 prev = cmpxchg(p, NULL, pcpu_rt);
Eric Dumazet951f7882017-10-08 21:07:18 -07001292 BUG_ON(prev);
Wei Wanga94b9362017-10-06 12:06:04 -07001293
Eric Dumazet61fb0d02019-05-15 19:39:52 -07001294 if (res->f6i->fib6_destroying) {
1295 struct fib6_info *from;
1296
1297 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1298 fib6_info_release(from);
1299 }
1300
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001301 return pcpu_rt;
1302}
1303
Wei Wang35732d02017-10-06 12:05:57 -07001304/* exception hash table implementation
1305 */
1306static DEFINE_SPINLOCK(rt6_exception_lock);
1307
1308/* Remove rt6_ex from hash table and free the memory
1309 * Caller must hold rt6_exception_lock
1310 */
1311static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1312 struct rt6_exception *rt6_ex)
1313{
Paolo Abenif5b51fe2019-02-20 18:18:12 +01001314 struct fib6_info *from;
Colin Ian Kingb2427e62017-10-10 18:01:16 +01001315 struct net *net;
Wei Wang81eb8442017-10-06 12:06:11 -07001316
Wei Wang35732d02017-10-06 12:05:57 -07001317 if (!bucket || !rt6_ex)
1318 return;
Colin Ian Kingb2427e62017-10-10 18:01:16 +01001319
1320 net = dev_net(rt6_ex->rt6i->dst.dev);
Paolo Abenif5b51fe2019-02-20 18:18:12 +01001321 net->ipv6.rt6_stats->fib_rt_cache--;
1322
1323 /* purge completely the exception to allow releasing the held resources:
1324 * some [sk] cache may keep the dst around for unlimited time
1325 */
Eric Dumazet0e233872019-04-28 12:22:25 -07001326 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
Paolo Abenif5b51fe2019-02-20 18:18:12 +01001327 fib6_info_release(from);
1328 dst_dev_put(&rt6_ex->rt6i->dst);
1329
Wei Wang35732d02017-10-06 12:05:57 -07001330 hlist_del_rcu(&rt6_ex->hlist);
David Ahern77634cc2018-04-17 17:33:27 -07001331 dst_release(&rt6_ex->rt6i->dst);
Wei Wang35732d02017-10-06 12:05:57 -07001332 kfree_rcu(rt6_ex, rcu);
1333 WARN_ON_ONCE(!bucket->depth);
1334 bucket->depth--;
1335}
1336
1337/* Remove oldest rt6_ex in bucket and free the memory
1338 * Caller must hold rt6_exception_lock
1339 */
1340static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1341{
1342 struct rt6_exception *rt6_ex, *oldest = NULL;
1343
1344 if (!bucket)
1345 return;
1346
1347 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1348 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1349 oldest = rt6_ex;
1350 }
1351 rt6_remove_exception(bucket, oldest);
1352}
1353
1354static u32 rt6_exception_hash(const struct in6_addr *dst,
1355 const struct in6_addr *src)
1356{
1357 static u32 seed __read_mostly;
1358 u32 val;
1359
1360 net_get_random_once(&seed, sizeof(seed));
1361 val = jhash(dst, sizeof(*dst), seed);
1362
1363#ifdef CONFIG_IPV6_SUBTREES
1364 if (src)
1365 val = jhash(src, sizeof(*src), val);
1366#endif
1367 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1368}
1369
1370/* Helper function to find the cached rt in the hash table
1371 * and update bucket pointer to point to the bucket for this
1372 * (daddr, saddr) pair
1373 * Caller must hold rt6_exception_lock
1374 */
1375static struct rt6_exception *
1376__rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1377 const struct in6_addr *daddr,
1378 const struct in6_addr *saddr)
1379{
1380 struct rt6_exception *rt6_ex;
1381 u32 hval;
1382
1383 if (!(*bucket) || !daddr)
1384 return NULL;
1385
1386 hval = rt6_exception_hash(daddr, saddr);
1387 *bucket += hval;
1388
1389 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1390 struct rt6_info *rt6 = rt6_ex->rt6i;
1391 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1392
1393#ifdef CONFIG_IPV6_SUBTREES
1394 if (matched && saddr)
1395 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1396#endif
1397 if (matched)
1398 return rt6_ex;
1399 }
1400 return NULL;
1401}
1402
1403/* Helper function to find the cached rt in the hash table
1404 * and update bucket pointer to point to the bucket for this
1405 * (daddr, saddr) pair
1406 * Caller must hold rcu_read_lock()
1407 */
1408static struct rt6_exception *
1409__rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1410 const struct in6_addr *daddr,
1411 const struct in6_addr *saddr)
1412{
1413 struct rt6_exception *rt6_ex;
1414 u32 hval;
1415
1416 WARN_ON_ONCE(!rcu_read_lock_held());
1417
1418 if (!(*bucket) || !daddr)
1419 return NULL;
1420
1421 hval = rt6_exception_hash(daddr, saddr);
1422 *bucket += hval;
1423
1424 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1425 struct rt6_info *rt6 = rt6_ex->rt6i;
1426 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1427
1428#ifdef CONFIG_IPV6_SUBTREES
1429 if (matched && saddr)
1430 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1431#endif
1432 if (matched)
1433 return rt6_ex;
1434 }
1435 return NULL;
1436}
1437
David Ahernb748f262019-04-16 14:36:06 -07001438static unsigned int fib6_mtu(const struct fib6_result *res)
Wei Wang35732d02017-10-06 12:05:57 -07001439{
David Ahernb748f262019-04-16 14:36:06 -07001440 const struct fib6_nh *nh = res->nh;
David Ahernd4ead6b2018-04-17 17:33:16 -07001441 unsigned int mtu;
1442
David Ahernb748f262019-04-16 14:36:06 -07001443 if (res->f6i->fib6_pmtu) {
1444 mtu = res->f6i->fib6_pmtu;
David Aherndcd1f572018-04-18 15:39:05 -07001445 } else {
David Ahernb748f262019-04-16 14:36:06 -07001446 struct net_device *dev = nh->fib_nh_dev;
David Aherndcd1f572018-04-18 15:39:05 -07001447 struct inet6_dev *idev;
1448
1449 rcu_read_lock();
1450 idev = __in6_dev_get(dev);
1451 mtu = idev->cnf.mtu6;
1452 rcu_read_unlock();
1453 }
1454
David Ahernd4ead6b2018-04-17 17:33:16 -07001455 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1456
David Ahernb748f262019-04-16 14:36:06 -07001457 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
David Ahernd4ead6b2018-04-17 17:33:16 -07001458}
1459
Wei Wang35732d02017-10-06 12:05:57 -07001460static int rt6_insert_exception(struct rt6_info *nrt,
David Ahern5012f0a2019-04-16 14:36:05 -07001461 const struct fib6_result *res)
Wei Wang35732d02017-10-06 12:05:57 -07001462{
David Ahern5e670d82018-04-17 17:33:14 -07001463 struct net *net = dev_net(nrt->dst.dev);
Wei Wang35732d02017-10-06 12:05:57 -07001464 struct rt6_exception_bucket *bucket;
1465 struct in6_addr *src_key = NULL;
1466 struct rt6_exception *rt6_ex;
David Ahern5012f0a2019-04-16 14:36:05 -07001467 struct fib6_info *f6i = res->f6i;
Wei Wang35732d02017-10-06 12:05:57 -07001468 int err = 0;
1469
Wei Wang35732d02017-10-06 12:05:57 -07001470 spin_lock_bh(&rt6_exception_lock);
1471
David Ahern5012f0a2019-04-16 14:36:05 -07001472 if (f6i->exception_bucket_flushed) {
Wei Wang35732d02017-10-06 12:05:57 -07001473 err = -EINVAL;
1474 goto out;
1475 }
1476
David Ahern5012f0a2019-04-16 14:36:05 -07001477 bucket = rcu_dereference_protected(f6i->rt6i_exception_bucket,
Wei Wang35732d02017-10-06 12:05:57 -07001478 lockdep_is_held(&rt6_exception_lock));
1479 if (!bucket) {
1480 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1481 GFP_ATOMIC);
1482 if (!bucket) {
1483 err = -ENOMEM;
1484 goto out;
1485 }
David Ahern5012f0a2019-04-16 14:36:05 -07001486 rcu_assign_pointer(f6i->rt6i_exception_bucket, bucket);
Wei Wang35732d02017-10-06 12:05:57 -07001487 }
1488
1489#ifdef CONFIG_IPV6_SUBTREES
David Ahern5012f0a2019-04-16 14:36:05 -07001490 /* fib6_src.plen != 0 indicates f6i is in subtree
Wei Wang35732d02017-10-06 12:05:57 -07001491 * and exception table is indexed by a hash of
David Ahern5012f0a2019-04-16 14:36:05 -07001492 * both fib6_dst and fib6_src.
Wei Wang35732d02017-10-06 12:05:57 -07001493 * Otherwise, the exception table is indexed by
David Ahern5012f0a2019-04-16 14:36:05 -07001494 * a hash of only fib6_dst.
Wei Wang35732d02017-10-06 12:05:57 -07001495 */
David Ahern5012f0a2019-04-16 14:36:05 -07001496 if (f6i->fib6_src.plen)
Wei Wang35732d02017-10-06 12:05:57 -07001497 src_key = &nrt->rt6i_src.addr;
1498#endif
David Ahern5012f0a2019-04-16 14:36:05 -07001499 /* rt6_mtu_change() might lower mtu on f6i.
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001500 * Only insert this exception route if its mtu
David Ahern5012f0a2019-04-16 14:36:05 -07001501 * is less than f6i's mtu value.
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001502 */
David Ahernb748f262019-04-16 14:36:06 -07001503 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001504 err = -EINVAL;
1505 goto out;
1506 }
Wei Wang60006a42017-10-06 12:05:58 -07001507
Wei Wang35732d02017-10-06 12:05:57 -07001508 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1509 src_key);
1510 if (rt6_ex)
1511 rt6_remove_exception(bucket, rt6_ex);
1512
1513 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1514 if (!rt6_ex) {
1515 err = -ENOMEM;
1516 goto out;
1517 }
1518 rt6_ex->rt6i = nrt;
1519 rt6_ex->stamp = jiffies;
Wei Wang35732d02017-10-06 12:05:57 -07001520 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1521 bucket->depth++;
Wei Wang81eb8442017-10-06 12:06:11 -07001522 net->ipv6.rt6_stats->fib_rt_cache++;
Wei Wang35732d02017-10-06 12:05:57 -07001523
1524 if (bucket->depth > FIB6_MAX_DEPTH)
1525 rt6_exception_remove_oldest(bucket);
1526
1527out:
1528 spin_unlock_bh(&rt6_exception_lock);
1529
1530 /* Update fn->fn_sernum to invalidate all cached dst */
Paolo Abenib886d5f2017-10-19 16:07:10 +02001531 if (!err) {
David Ahern5012f0a2019-04-16 14:36:05 -07001532 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1533 fib6_update_sernum(net, f6i);
1534 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
Paolo Abenib886d5f2017-10-19 16:07:10 +02001535 fib6_force_start_gc(net);
1536 }
Wei Wang35732d02017-10-06 12:05:57 -07001537
1538 return err;
1539}
1540
David Ahern8d1c8022018-04-17 17:33:26 -07001541void rt6_flush_exceptions(struct fib6_info *rt)
Wei Wang35732d02017-10-06 12:05:57 -07001542{
1543 struct rt6_exception_bucket *bucket;
1544 struct rt6_exception *rt6_ex;
1545 struct hlist_node *tmp;
1546 int i;
1547
1548 spin_lock_bh(&rt6_exception_lock);
1549 /* Prevent rt6_insert_exception() to recreate the bucket list */
1550 rt->exception_bucket_flushed = 1;
1551
1552 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1553 lockdep_is_held(&rt6_exception_lock));
1554 if (!bucket)
1555 goto out;
1556
1557 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1558 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
1559 rt6_remove_exception(bucket, rt6_ex);
1560 WARN_ON_ONCE(bucket->depth);
1561 bucket++;
1562 }
1563
1564out:
1565 spin_unlock_bh(&rt6_exception_lock);
1566}
1567
1568/* Find cached rt in the hash table inside passed in rt
1569 * Caller has to hold rcu_read_lock()
1570 */
David Ahern7e4b5122019-04-16 14:36:00 -07001571static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
Wei Wang510e2ce2019-05-16 13:30:54 -07001572 const struct in6_addr *daddr,
1573 const struct in6_addr *saddr)
Wei Wang35732d02017-10-06 12:05:57 -07001574{
Wei Wang510e2ce2019-05-16 13:30:54 -07001575 const struct in6_addr *src_key = NULL;
Wei Wang35732d02017-10-06 12:05:57 -07001576 struct rt6_exception_bucket *bucket;
Wei Wang35732d02017-10-06 12:05:57 -07001577 struct rt6_exception *rt6_ex;
David Ahern7e4b5122019-04-16 14:36:00 -07001578 struct rt6_info *ret = NULL;
Wei Wang35732d02017-10-06 12:05:57 -07001579
Wei Wang35732d02017-10-06 12:05:57 -07001580#ifdef CONFIG_IPV6_SUBTREES
David Ahern7e4b5122019-04-16 14:36:00 -07001581 /* fib6i_src.plen != 0 indicates f6i is in subtree
Wei Wang35732d02017-10-06 12:05:57 -07001582 * and exception table is indexed by a hash of
David Ahern7e4b5122019-04-16 14:36:00 -07001583 * both fib6_dst and fib6_src.
Wei Wang510e2ce2019-05-16 13:30:54 -07001584 * However, the src addr used to create the hash
1585 * might not be exactly the passed in saddr which
1586 * is a /128 addr from the flow.
1587 * So we need to use f6i->fib6_src to redo lookup
1588 * if the passed in saddr does not find anything.
1589 * (See the logic in ip6_rt_cache_alloc() on how
1590 * rt->rt6i_src is updated.)
Wei Wang35732d02017-10-06 12:05:57 -07001591 */
David Ahern7e4b5122019-04-16 14:36:00 -07001592 if (res->f6i->fib6_src.plen)
Wei Wang35732d02017-10-06 12:05:57 -07001593 src_key = saddr;
Wei Wang510e2ce2019-05-16 13:30:54 -07001594find_ex:
Wei Wang35732d02017-10-06 12:05:57 -07001595#endif
Wei Wang510e2ce2019-05-16 13:30:54 -07001596 bucket = rcu_dereference(res->f6i->rt6i_exception_bucket);
Wei Wang35732d02017-10-06 12:05:57 -07001597 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1598
1599 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
David Ahern7e4b5122019-04-16 14:36:00 -07001600 ret = rt6_ex->rt6i;
Wei Wang35732d02017-10-06 12:05:57 -07001601
Wei Wang510e2ce2019-05-16 13:30:54 -07001602#ifdef CONFIG_IPV6_SUBTREES
1603 /* Use fib6_src as src_key and redo lookup */
1604 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1605 src_key = &res->f6i->fib6_src.addr;
1606 goto find_ex;
1607 }
1608#endif
1609
David Ahern7e4b5122019-04-16 14:36:00 -07001610 return ret;
Wei Wang35732d02017-10-06 12:05:57 -07001611}
1612
1613/* Remove the passed in cached rt from the hash table that contains it */
David Ahern23fb93a2018-04-17 17:33:23 -07001614static int rt6_remove_exception_rt(struct rt6_info *rt)
Wei Wang35732d02017-10-06 12:05:57 -07001615{
Wei Wang35732d02017-10-06 12:05:57 -07001616 struct rt6_exception_bucket *bucket;
1617 struct in6_addr *src_key = NULL;
1618 struct rt6_exception *rt6_ex;
David Ahern8a14e462018-04-23 11:32:07 -07001619 struct fib6_info *from;
Wei Wang35732d02017-10-06 12:05:57 -07001620 int err;
1621
Eric Dumazet091311d2018-04-24 09:22:49 -07001622 from = rcu_dereference(rt->from);
Wei Wang35732d02017-10-06 12:05:57 -07001623 if (!from ||
Colin Ian King442d7132017-10-10 19:10:30 +01001624 !(rt->rt6i_flags & RTF_CACHE))
Wei Wang35732d02017-10-06 12:05:57 -07001625 return -EINVAL;
1626
1627 if (!rcu_access_pointer(from->rt6i_exception_bucket))
1628 return -ENOENT;
1629
1630 spin_lock_bh(&rt6_exception_lock);
1631 bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
1632 lockdep_is_held(&rt6_exception_lock));
1633#ifdef CONFIG_IPV6_SUBTREES
1634 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1635 * and exception table is indexed by a hash of
1636 * both rt6i_dst and rt6i_src.
1637 * Otherwise, the exception table is indexed by
1638 * a hash of only rt6i_dst.
1639 */
David Ahern93c2fb22018-04-18 15:38:59 -07001640 if (from->fib6_src.plen)
Wei Wang35732d02017-10-06 12:05:57 -07001641 src_key = &rt->rt6i_src.addr;
1642#endif
1643 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1644 &rt->rt6i_dst.addr,
1645 src_key);
1646 if (rt6_ex) {
1647 rt6_remove_exception(bucket, rt6_ex);
1648 err = 0;
1649 } else {
1650 err = -ENOENT;
1651 }
1652
1653 spin_unlock_bh(&rt6_exception_lock);
1654 return err;
1655}
1656
1657/* Find rt6_ex which contains the passed in rt cache and
1658 * refresh its stamp
1659 */
1660static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1661{
Wei Wang35732d02017-10-06 12:05:57 -07001662 struct rt6_exception_bucket *bucket;
1663 struct in6_addr *src_key = NULL;
1664 struct rt6_exception *rt6_ex;
Paolo Abeni193f3682019-02-21 11:19:41 +01001665 struct fib6_info *from;
Wei Wang35732d02017-10-06 12:05:57 -07001666
1667 rcu_read_lock();
Paolo Abeni193f3682019-02-21 11:19:41 +01001668 from = rcu_dereference(rt->from);
1669 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1670 goto unlock;
1671
Wei Wang35732d02017-10-06 12:05:57 -07001672 bucket = rcu_dereference(from->rt6i_exception_bucket);
1673
1674#ifdef CONFIG_IPV6_SUBTREES
1675 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1676 * and exception table is indexed by a hash of
1677 * both rt6i_dst and rt6i_src.
1678 * Otherwise, the exception table is indexed by
1679 * a hash of only rt6i_dst.
1680 */
David Ahern93c2fb22018-04-18 15:38:59 -07001681 if (from->fib6_src.plen)
Wei Wang35732d02017-10-06 12:05:57 -07001682 src_key = &rt->rt6i_src.addr;
1683#endif
1684 rt6_ex = __rt6_find_exception_rcu(&bucket,
1685 &rt->rt6i_dst.addr,
1686 src_key);
1687 if (rt6_ex)
1688 rt6_ex->stamp = jiffies;
1689
Paolo Abeni193f3682019-02-21 11:19:41 +01001690unlock:
Wei Wang35732d02017-10-06 12:05:57 -07001691 rcu_read_unlock();
1692}
1693
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001694static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1695 struct rt6_info *rt, int mtu)
1696{
1697 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1698 * lowest MTU in the path: always allow updating the route PMTU to
1699 * reflect PMTU decreases.
1700 *
1701 * If the new MTU is higher, and the route PMTU is equal to the local
1702 * MTU, this means the old MTU is the lowest in the path, so allow
1703 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1704 * handle this.
1705 */
1706
1707 if (dst_mtu(&rt->dst) >= mtu)
1708 return true;
1709
1710 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1711 return true;
1712
1713 return false;
1714}
1715
1716static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
David Ahern8d1c8022018-04-17 17:33:26 -07001717 struct fib6_info *rt, int mtu)
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001718{
1719 struct rt6_exception_bucket *bucket;
1720 struct rt6_exception *rt6_ex;
1721 int i;
1722
1723 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1724 lockdep_is_held(&rt6_exception_lock));
1725
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001726 if (!bucket)
1727 return;
1728
1729 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1730 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1731 struct rt6_info *entry = rt6_ex->rt6i;
1732
1733 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
David Ahernd4ead6b2018-04-17 17:33:16 -07001734 * route), the metrics of its rt->from have already
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001735 * been updated.
1736 */
David Ahernd4ead6b2018-04-17 17:33:16 -07001737 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001738 rt6_mtu_change_route_allowed(idev, entry, mtu))
David Ahernd4ead6b2018-04-17 17:33:16 -07001739 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001740 }
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001741 bucket++;
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001742 }
1743}
1744
Wei Wangb16cb452017-10-06 12:06:00 -07001745#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1746
David Ahern8d1c8022018-04-17 17:33:26 -07001747static void rt6_exceptions_clean_tohost(struct fib6_info *rt,
Wei Wangb16cb452017-10-06 12:06:00 -07001748 struct in6_addr *gateway)
1749{
1750 struct rt6_exception_bucket *bucket;
1751 struct rt6_exception *rt6_ex;
1752 struct hlist_node *tmp;
1753 int i;
1754
1755 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1756 return;
1757
1758 spin_lock_bh(&rt6_exception_lock);
1759 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1760 lockdep_is_held(&rt6_exception_lock));
1761
1762 if (bucket) {
1763 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1764 hlist_for_each_entry_safe(rt6_ex, tmp,
1765 &bucket->chain, hlist) {
1766 struct rt6_info *entry = rt6_ex->rt6i;
1767
1768 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1769 RTF_CACHE_GATEWAY &&
1770 ipv6_addr_equal(gateway,
1771 &entry->rt6i_gateway)) {
1772 rt6_remove_exception(bucket, rt6_ex);
1773 }
1774 }
1775 bucket++;
1776 }
1777 }
1778
1779 spin_unlock_bh(&rt6_exception_lock);
1780}
1781
Wei Wangc757faa2017-10-06 12:06:01 -07001782static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1783 struct rt6_exception *rt6_ex,
1784 struct fib6_gc_args *gc_args,
1785 unsigned long now)
1786{
1787 struct rt6_info *rt = rt6_ex->rt6i;
1788
Paolo Abeni1859bac2017-10-19 16:07:11 +02001789 /* we are pruning and obsoleting aged-out and non gateway exceptions
1790 * even if others have still references to them, so that on next
1791 * dst_check() such references can be dropped.
1792 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
1793 * expired, independently from their aging, as per RFC 8201 section 4
1794 */
Wei Wang31afeb42018-01-26 11:40:17 -08001795 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
1796 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1797 RT6_TRACE("aging clone %p\n", rt);
1798 rt6_remove_exception(bucket, rt6_ex);
1799 return;
1800 }
1801 } else if (time_after(jiffies, rt->dst.expires)) {
1802 RT6_TRACE("purging expired route %p\n", rt);
Wei Wangc757faa2017-10-06 12:06:01 -07001803 rt6_remove_exception(bucket, rt6_ex);
1804 return;
Wei Wang31afeb42018-01-26 11:40:17 -08001805 }
1806
1807 if (rt->rt6i_flags & RTF_GATEWAY) {
Wei Wangc757faa2017-10-06 12:06:01 -07001808 struct neighbour *neigh;
1809 __u8 neigh_flags = 0;
1810
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001811 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
1812 if (neigh)
Wei Wangc757faa2017-10-06 12:06:01 -07001813 neigh_flags = neigh->flags;
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001814
Wei Wangc757faa2017-10-06 12:06:01 -07001815 if (!(neigh_flags & NTF_ROUTER)) {
1816 RT6_TRACE("purging route %p via non-router but gateway\n",
1817 rt);
1818 rt6_remove_exception(bucket, rt6_ex);
1819 return;
1820 }
1821 }
Wei Wang31afeb42018-01-26 11:40:17 -08001822
Wei Wangc757faa2017-10-06 12:06:01 -07001823 gc_args->more++;
1824}
1825
David Ahern8d1c8022018-04-17 17:33:26 -07001826void rt6_age_exceptions(struct fib6_info *rt,
Wei Wangc757faa2017-10-06 12:06:01 -07001827 struct fib6_gc_args *gc_args,
1828 unsigned long now)
1829{
1830 struct rt6_exception_bucket *bucket;
1831 struct rt6_exception *rt6_ex;
1832 struct hlist_node *tmp;
1833 int i;
1834
1835 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1836 return;
1837
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001838 rcu_read_lock_bh();
1839 spin_lock(&rt6_exception_lock);
Wei Wangc757faa2017-10-06 12:06:01 -07001840 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1841 lockdep_is_held(&rt6_exception_lock));
1842
1843 if (bucket) {
1844 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1845 hlist_for_each_entry_safe(rt6_ex, tmp,
1846 &bucket->chain, hlist) {
1847 rt6_age_examine_exception(bucket, rt6_ex,
1848 gc_args, now);
1849 }
1850 bucket++;
1851 }
1852 }
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001853 spin_unlock(&rt6_exception_lock);
1854 rcu_read_unlock_bh();
Wei Wangc757faa2017-10-06 12:06:01 -07001855}
1856
David Ahern1d053da2018-05-09 20:34:21 -07001857/* must be called with rcu lock held */
David Aherneffda4d2019-04-16 14:36:10 -07001858int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
1859 struct flowi6 *fl6, struct fib6_result *res, int strict)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860{
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001861 struct fib6_node *fn, *saved_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
David Ahern64547432018-05-09 20:34:19 -07001863 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001864 saved_fn = fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
David Ahernca254492015-10-12 11:47:10 -07001866 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1867 oif = 0;
1868
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001869redo_rt6_select:
David Aherneffda4d2019-04-16 14:36:10 -07001870 rt6_select(net, fn, oif, res, strict);
1871 if (res->f6i == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001872 fn = fib6_backtrack(fn, &fl6->saddr);
1873 if (fn)
1874 goto redo_rt6_select;
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001875 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1876 /* also consider unreachable route */
1877 strict &= ~RT6_LOOKUP_F_REACHABLE;
1878 fn = saved_fn;
1879 goto redo_rt6_select;
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001880 }
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001881 }
1882
David Aherneffda4d2019-04-16 14:36:10 -07001883 trace_fib6_table_lookup(net, res, table, fl6);
YOSHIFUJI Hideakifb9de912006-03-20 16:59:08 -08001884
David Aherneffda4d2019-04-16 14:36:10 -07001885 return 0;
David Ahern1d053da2018-05-09 20:34:21 -07001886}
1887
1888struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1889 int oif, struct flowi6 *fl6,
1890 const struct sk_buff *skb, int flags)
1891{
David Ahernb1d40992019-04-16 14:35:59 -07001892 struct fib6_result res = {};
David Ahern1d053da2018-05-09 20:34:21 -07001893 struct rt6_info *rt;
1894 int strict = 0;
1895
1896 strict |= flags & RT6_LOOKUP_F_IFACE;
1897 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1898 if (net->ipv6.devconf_all->forwarding == 0)
1899 strict |= RT6_LOOKUP_F_REACHABLE;
1900
1901 rcu_read_lock();
1902
David Aherneffda4d2019-04-16 14:36:10 -07001903 fib6_table_lookup(net, table, oif, fl6, &res, strict);
David Ahernb1d40992019-04-16 14:35:59 -07001904 if (res.f6i == net->ipv6.fib6_null_entry) {
David Ahern421842e2018-04-17 17:33:18 -07001905 rt = net->ipv6.ip6_null_entry;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001906 rcu_read_unlock();
Wei Wangd3843fe2017-10-06 12:06:06 -07001907 dst_hold(&rt->dst);
Wei Wangd3843fe2017-10-06 12:06:06 -07001908 return rt;
David Ahern23fb93a2018-04-17 17:33:23 -07001909 }
1910
David Ahernb1d40992019-04-16 14:35:59 -07001911 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
David Ahernd83009d2019-04-09 14:41:17 -07001912
David Ahern23fb93a2018-04-17 17:33:23 -07001913 /*Search through exception table */
David Ahern7e4b5122019-04-16 14:36:00 -07001914 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
David Ahern23fb93a2018-04-17 17:33:23 -07001915 if (rt) {
David Ahern10585b42019-03-20 09:24:50 -07001916 if (ip6_hold_safe(net, &rt))
Wei Wangd3843fe2017-10-06 12:06:06 -07001917 dst_use_noref(&rt->dst, jiffies);
David Ahernd4ead6b2018-04-17 17:33:16 -07001918
Wei Wang66f5d6c2017-10-06 12:06:10 -07001919 rcu_read_unlock();
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001920 return rt;
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001921 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
David Ahernb1d40992019-04-16 14:35:59 -07001922 !res.nh->fib_nh_gw_family)) {
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001923 /* Create a RTF_CACHE clone which will not be
1924 * owned by the fib6 tree. It is for the special case where
1925 * the daddr in the skb during the neighbor look-up is different
1926 * from the fl6->daddr used to look-up route here.
1927 */
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001928 struct rt6_info *uncached_rt;
1929
David Ahern85bd05d2019-04-16 14:36:01 -07001930 uncached_rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001931
David Ahern4d85cd02018-04-20 15:37:59 -07001932 rcu_read_unlock();
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001933
Wei Wang1cfb71e2017-06-17 10:42:33 -07001934 if (uncached_rt) {
1935 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1936 * No need for another dst_hold()
1937 */
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -07001938 rt6_uncached_list_add(uncached_rt);
Wei Wang81eb8442017-10-06 12:06:11 -07001939 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
Wei Wang1cfb71e2017-06-17 10:42:33 -07001940 } else {
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001941 uncached_rt = net->ipv6.ip6_null_entry;
Wei Wang1cfb71e2017-06-17 10:42:33 -07001942 dst_hold(&uncached_rt->dst);
1943 }
David Ahernb8115802015-11-19 12:24:22 -08001944
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001945 return uncached_rt;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001946 } else {
1947 /* Get a percpu copy */
1948
1949 struct rt6_info *pcpu_rt;
1950
Eric Dumazet951f7882017-10-08 21:07:18 -07001951 local_bh_disable();
David Aherndb3fede2019-04-16 14:36:03 -07001952 pcpu_rt = rt6_get_pcpu_route(&res);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001953
David Ahern93531c62018-04-17 17:33:25 -07001954 if (!pcpu_rt)
David Aherndb3fede2019-04-16 14:36:03 -07001955 pcpu_rt = rt6_make_pcpu_route(net, &res);
David Ahern93531c62018-04-17 17:33:25 -07001956
Eric Dumazet951f7882017-10-08 21:07:18 -07001957 local_bh_enable();
1958 rcu_read_unlock();
David Ahernd4bea422018-05-09 20:34:24 -07001959
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001960 return pcpu_rt;
1961 }
Thomas Grafc71099a2006-08-04 23:20:06 -07001962}
David Ahern9ff74382016-06-13 13:44:19 -07001963EXPORT_SYMBOL_GPL(ip6_pol_route);
Thomas Grafc71099a2006-08-04 23:20:06 -07001964
David Ahernb75cc8f2018-03-02 08:32:17 -08001965static struct rt6_info *ip6_pol_route_input(struct net *net,
1966 struct fib6_table *table,
1967 struct flowi6 *fl6,
1968 const struct sk_buff *skb,
1969 int flags)
Pavel Emelyanov4acad722007-10-15 13:02:51 -07001970{
David Ahernb75cc8f2018-03-02 08:32:17 -08001971 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
Pavel Emelyanov4acad722007-10-15 13:02:51 -07001972}
1973
Mahesh Bandeward409b842016-09-16 12:59:08 -07001974struct dst_entry *ip6_route_input_lookup(struct net *net,
1975 struct net_device *dev,
David Ahernb75cc8f2018-03-02 08:32:17 -08001976 struct flowi6 *fl6,
1977 const struct sk_buff *skb,
1978 int flags)
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00001979{
1980 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1981 flags |= RT6_LOOKUP_F_IFACE;
1982
David Ahernb75cc8f2018-03-02 08:32:17 -08001983 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00001984}
Mahesh Bandeward409b842016-09-16 12:59:08 -07001985EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00001986
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001987static void ip6_multipath_l3_keys(const struct sk_buff *skb,
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05001988 struct flow_keys *keys,
1989 struct flow_keys *flkeys)
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001990{
1991 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
1992 const struct ipv6hdr *key_iph = outer_iph;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05001993 struct flow_keys *_flkeys = flkeys;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001994 const struct ipv6hdr *inner_iph;
1995 const struct icmp6hdr *icmph;
1996 struct ipv6hdr _inner_iph;
Eric Dumazetcea67a22018-04-29 09:54:59 -07001997 struct icmp6hdr _icmph;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001998
1999 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2000 goto out;
2001
Eric Dumazetcea67a22018-04-29 09:54:59 -07002002 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2003 sizeof(_icmph), &_icmph);
2004 if (!icmph)
2005 goto out;
2006
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002007 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
2008 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
2009 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
2010 icmph->icmp6_type != ICMPV6_PARAMPROB)
2011 goto out;
2012
2013 inner_iph = skb_header_pointer(skb,
2014 skb_transport_offset(skb) + sizeof(*icmph),
2015 sizeof(_inner_iph), &_inner_iph);
2016 if (!inner_iph)
2017 goto out;
2018
2019 key_iph = inner_iph;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002020 _flkeys = NULL;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002021out:
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002022 if (_flkeys) {
2023 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2024 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2025 keys->tags.flow_label = _flkeys->tags.flow_label;
2026 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2027 } else {
2028 keys->addrs.v6addrs.src = key_iph->saddr;
2029 keys->addrs.v6addrs.dst = key_iph->daddr;
Michal Kubecekfa1be7e2018-06-04 11:36:05 +02002030 keys->tags.flow_label = ip6_flowlabel(key_iph);
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002031 keys->basic.ip_proto = key_iph->nexthdr;
2032 }
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002033}
2034
2035/* if skb is set it will be used and fl6 can be NULL */
David Ahernb4bac172018-03-02 08:32:18 -08002036u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2037 const struct sk_buff *skb, struct flow_keys *flkeys)
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002038{
2039 struct flow_keys hash_keys;
David Ahern9a2a5372018-03-02 08:32:15 -08002040 u32 mhash;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002041
David S. Millerbbfa0472018-03-12 11:09:33 -04002042 switch (ip6_multipath_hash_policy(net)) {
David Ahernb4bac172018-03-02 08:32:18 -08002043 case 0:
2044 memset(&hash_keys, 0, sizeof(hash_keys));
2045 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2046 if (skb) {
2047 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2048 } else {
2049 hash_keys.addrs.v6addrs.src = fl6->saddr;
2050 hash_keys.addrs.v6addrs.dst = fl6->daddr;
Michal Kubecekfa1be7e2018-06-04 11:36:05 +02002051 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
David Ahernb4bac172018-03-02 08:32:18 -08002052 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2053 }
2054 break;
2055 case 1:
2056 if (skb) {
2057 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2058 struct flow_keys keys;
2059
2060 /* short-circuit if we already have L4 hash present */
2061 if (skb->l4_hash)
2062 return skb_get_hash_raw(skb) >> 1;
2063
2064 memset(&hash_keys, 0, sizeof(hash_keys));
2065
2066 if (!flkeys) {
2067 skb_flow_dissect_flow_keys(skb, &keys, flag);
2068 flkeys = &keys;
2069 }
2070 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2071 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2072 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2073 hash_keys.ports.src = flkeys->ports.src;
2074 hash_keys.ports.dst = flkeys->ports.dst;
2075 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2076 } else {
2077 memset(&hash_keys, 0, sizeof(hash_keys));
2078 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2079 hash_keys.addrs.v6addrs.src = fl6->saddr;
2080 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2081 hash_keys.ports.src = fl6->fl6_sport;
2082 hash_keys.ports.dst = fl6->fl6_dport;
2083 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2084 }
2085 break;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002086 }
David Ahern9a2a5372018-03-02 08:32:15 -08002087 mhash = flow_hash_from_keys(&hash_keys);
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002088
David Ahern9a2a5372018-03-02 08:32:15 -08002089 return mhash >> 1;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002090}
2091
Thomas Grafc71099a2006-08-04 23:20:06 -07002092void ip6_route_input(struct sk_buff *skb)
2093{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00002094 const struct ipv6hdr *iph = ipv6_hdr(skb);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002095 struct net *net = dev_net(skb->dev);
Thomas Grafadaa70b2006-10-13 15:01:03 -07002096 int flags = RT6_LOOKUP_F_HAS_SADDR;
Jiri Benc904af042015-08-20 13:56:31 +02002097 struct ip_tunnel_info *tun_info;
David S. Miller4c9483b2011-03-12 16:22:43 -05002098 struct flowi6 fl6 = {
David Aherne0d56fd2016-09-10 12:09:57 -07002099 .flowi6_iif = skb->dev->ifindex,
David S. Miller4c9483b2011-03-12 16:22:43 -05002100 .daddr = iph->daddr,
2101 .saddr = iph->saddr,
YOSHIFUJI Hideaki / 吉藤英明6502ca52013-01-13 05:01:51 +00002102 .flowlabel = ip6_flowinfo(iph),
David S. Miller4c9483b2011-03-12 16:22:43 -05002103 .flowi6_mark = skb->mark,
2104 .flowi6_proto = iph->nexthdr,
Thomas Grafc71099a2006-08-04 23:20:06 -07002105 };
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002106 struct flow_keys *flkeys = NULL, _flkeys;
Thomas Grafadaa70b2006-10-13 15:01:03 -07002107
Jiri Benc904af042015-08-20 13:56:31 +02002108 tun_info = skb_tunnel_info(skb);
Jiri Benc46fa0622015-08-28 20:48:19 +02002109 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
Jiri Benc904af042015-08-20 13:56:31 +02002110 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002111
2112 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2113 flkeys = &_flkeys;
2114
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002115 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
David Ahernb4bac172018-03-02 08:32:18 -08002116 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
Jiri Benc06e9d042015-08-20 13:56:26 +02002117 skb_dst_drop(skb);
David Ahernb75cc8f2018-03-02 08:32:17 -08002118 skb_dst_set(skb,
2119 ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags));
Thomas Grafc71099a2006-08-04 23:20:06 -07002120}
2121
David Ahernb75cc8f2018-03-02 08:32:17 -08002122static struct rt6_info *ip6_pol_route_output(struct net *net,
2123 struct fib6_table *table,
2124 struct flowi6 *fl6,
2125 const struct sk_buff *skb,
2126 int flags)
Thomas Grafc71099a2006-08-04 23:20:06 -07002127{
David Ahernb75cc8f2018-03-02 08:32:17 -08002128 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
Thomas Grafc71099a2006-08-04 23:20:06 -07002129}
2130
Paolo Abeni6f21c962016-01-29 12:30:19 +01002131struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
2132 struct flowi6 *fl6, int flags)
Thomas Grafc71099a2006-08-04 23:20:06 -07002133{
David Ahernd46a9d62015-10-21 08:42:22 -07002134 bool any_src;
Thomas Grafc71099a2006-08-04 23:20:06 -07002135
Robert Shearman3ede0bb2018-09-19 13:56:53 +01002136 if (ipv6_addr_type(&fl6->daddr) &
2137 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
David Ahern4c1feac2016-09-10 12:09:56 -07002138 struct dst_entry *dst;
2139
2140 dst = l3mdev_link_scope_lookup(net, fl6);
2141 if (dst)
2142 return dst;
2143 }
David Ahernca254492015-10-12 11:47:10 -07002144
Pavel Emelyanov1fb94892012-08-08 21:53:36 +00002145 fl6->flowi6_iif = LOOPBACK_IFINDEX;
David McCullough4dc27d1c2012-06-25 15:42:26 +00002146
David Ahernd46a9d62015-10-21 08:42:22 -07002147 any_src = ipv6_addr_any(&fl6->saddr);
David Ahern741a11d2015-09-28 10:12:13 -07002148 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
David Ahernd46a9d62015-10-21 08:42:22 -07002149 (fl6->flowi6_oif && any_src))
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -07002150 flags |= RT6_LOOKUP_F_IFACE;
Thomas Grafc71099a2006-08-04 23:20:06 -07002151
David Ahernd46a9d62015-10-21 08:42:22 -07002152 if (!any_src)
Thomas Grafadaa70b2006-10-13 15:01:03 -07002153 flags |= RT6_LOOKUP_F_HAS_SADDR;
YOSHIFUJI Hideaki / 吉藤英明0c9a2ac2010-03-07 00:14:44 +00002154 else if (sk)
2155 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
Thomas Grafadaa70b2006-10-13 15:01:03 -07002156
David Ahernb75cc8f2018-03-02 08:32:17 -08002157 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158}
Paolo Abeni6f21c962016-01-29 12:30:19 +01002159EXPORT_SYMBOL_GPL(ip6_route_output_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160
David S. Miller2774c132011-03-01 14:59:04 -08002161struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
David S. Miller14e50e52007-05-24 18:17:54 -07002162{
David S. Miller5c1e6aa2011-04-28 14:13:38 -07002163 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
Wei Wang1dbe32522017-06-17 10:42:26 -07002164 struct net_device *loopback_dev = net->loopback_dev;
David S. Miller14e50e52007-05-24 18:17:54 -07002165 struct dst_entry *new = NULL;
2166
Wei Wang1dbe32522017-06-17 10:42:26 -07002167 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
Steffen Klassert62cf27e2017-10-09 08:39:43 +02002168 DST_OBSOLETE_DEAD, 0);
David S. Miller14e50e52007-05-24 18:17:54 -07002169 if (rt) {
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002170 rt6_info_init(rt);
Wei Wang81eb8442017-10-06 12:06:11 -07002171 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002172
Changli Gaod8d1f302010-06-10 23:31:35 -07002173 new = &rt->dst;
David S. Miller14e50e52007-05-24 18:17:54 -07002174 new->__use = 1;
Herbert Xu352e5122007-11-13 21:34:06 -08002175 new->input = dst_discard;
Eric W. Biedermanede20592015-10-07 16:48:47 -05002176 new->output = dst_discard_out;
David S. Miller14e50e52007-05-24 18:17:54 -07002177
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002178 dst_copy_metrics(new, &ort->dst);
David S. Miller14e50e52007-05-24 18:17:54 -07002179
Wei Wang1dbe32522017-06-17 10:42:26 -07002180 rt->rt6i_idev = in6_dev_get(loopback_dev);
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00002181 rt->rt6i_gateway = ort->rt6i_gateway;
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002182 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
David S. Miller14e50e52007-05-24 18:17:54 -07002183
2184 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2185#ifdef CONFIG_IPV6_SUBTREES
2186 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2187#endif
David S. Miller14e50e52007-05-24 18:17:54 -07002188 }
2189
David S. Miller69ead7a2011-03-01 14:45:33 -08002190 dst_release(dst_orig);
2191 return new ? new : ERR_PTR(-ENOMEM);
David S. Miller14e50e52007-05-24 18:17:54 -07002192}
David S. Miller14e50e52007-05-24 18:17:54 -07002193
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194/*
2195 * Destination cache support functions
2196 */
2197
David Ahern8d1c8022018-04-17 17:33:26 -07002198static bool fib6_check(struct fib6_info *f6i, u32 cookie)
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002199{
Steffen Klassert36143642017-08-25 09:05:42 +02002200 u32 rt_cookie = 0;
Wei Wangc5cff852017-08-21 09:47:10 -07002201
David Ahern8ae86972018-04-20 15:38:03 -07002202 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
David Ahern93531c62018-04-17 17:33:25 -07002203 return false;
2204
2205 if (fib6_check_expired(f6i))
2206 return false;
2207
2208 return true;
2209}
2210
David Aherna68886a2018-04-20 15:38:02 -07002211static struct dst_entry *rt6_check(struct rt6_info *rt,
2212 struct fib6_info *from,
2213 u32 cookie)
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002214{
Wei Wangc5cff852017-08-21 09:47:10 -07002215 u32 rt_cookie = 0;
2216
David Aherna68886a2018-04-20 15:38:02 -07002217 if ((from && !fib6_get_cookie_safe(from, &rt_cookie)) ||
David Ahern93531c62018-04-17 17:33:25 -07002218 rt_cookie != cookie)
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002219 return NULL;
2220
2221 if (rt6_check_expired(rt))
2222 return NULL;
2223
2224 return &rt->dst;
2225}
2226
David Aherna68886a2018-04-20 15:38:02 -07002227static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2228 struct fib6_info *from,
2229 u32 cookie)
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002230{
Martin KaFai Lau5973fb12015-11-11 11:51:07 -08002231 if (!__rt6_check_expired(rt) &&
2232 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
David Aherna68886a2018-04-20 15:38:02 -07002233 fib6_check(from, cookie))
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002234 return &rt->dst;
2235 else
2236 return NULL;
2237}
2238
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2240{
David Aherna87b7dc2018-04-20 15:38:00 -07002241 struct dst_entry *dst_ret;
David Aherna68886a2018-04-20 15:38:02 -07002242 struct fib6_info *from;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 struct rt6_info *rt;
2244
David Aherna87b7dc2018-04-20 15:38:00 -07002245 rt = container_of(dst, struct rt6_info, dst);
2246
2247 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
Nicolas Dichtel6f3118b2012-09-10 22:09:46 +00002249 /* All IPV6 dsts are created with ->obsolete set to the value
2250 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2251 * into this function always.
2252 */
Hannes Frederic Sowae3bc10b2013-10-24 07:48:24 +02002253
David Aherna68886a2018-04-20 15:38:02 -07002254 from = rcu_dereference(rt->from);
Martin KaFai Lau4b32b5a2015-04-28 13:03:06 -07002255
David Aherna68886a2018-04-20 15:38:02 -07002256 if (from && (rt->rt6i_flags & RTF_PCPU ||
2257 unlikely(!list_empty(&rt->rt6i_uncached))))
2258 dst_ret = rt6_dst_from_check(rt, from, cookie);
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002259 else
David Aherna68886a2018-04-20 15:38:02 -07002260 dst_ret = rt6_check(rt, from, cookie);
David Aherna87b7dc2018-04-20 15:38:00 -07002261
2262 rcu_read_unlock();
2263
2264 return dst_ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265}
2266
2267static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2268{
2269 struct rt6_info *rt = (struct rt6_info *) dst;
2270
2271 if (rt) {
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002272 if (rt->rt6i_flags & RTF_CACHE) {
David Ahernc3c14da2018-04-23 11:32:06 -07002273 rcu_read_lock();
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002274 if (rt6_check_expired(rt)) {
David Ahern93531c62018-04-17 17:33:25 -07002275 rt6_remove_exception_rt(rt);
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002276 dst = NULL;
2277 }
David Ahernc3c14da2018-04-23 11:32:06 -07002278 rcu_read_unlock();
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002279 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 dst_release(dst);
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002281 dst = NULL;
2282 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283 }
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002284 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285}
2286
2287static void ip6_link_failure(struct sk_buff *skb)
2288{
2289 struct rt6_info *rt;
2290
Alexey Dobriyan3ffe5332010-02-18 08:25:24 +00002291 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292
Eric Dumazetadf30902009-06-02 05:19:30 +00002293 rt = (struct rt6_info *) skb_dst(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 if (rt) {
David Ahern8a14e462018-04-23 11:32:07 -07002295 rcu_read_lock();
Hannes Frederic Sowa1eb4f752013-07-10 23:00:57 +02002296 if (rt->rt6i_flags & RTF_CACHE) {
Xin Long761f6022018-11-14 00:48:28 +08002297 rt6_remove_exception_rt(rt);
Wei Wangc5cff852017-08-21 09:47:10 -07002298 } else {
David Aherna68886a2018-04-20 15:38:02 -07002299 struct fib6_info *from;
Wei Wangc5cff852017-08-21 09:47:10 -07002300 struct fib6_node *fn;
2301
David Aherna68886a2018-04-20 15:38:02 -07002302 from = rcu_dereference(rt->from);
2303 if (from) {
2304 fn = rcu_dereference(from->fib6_node);
2305 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2306 fn->fn_sernum = -1;
2307 }
Hannes Frederic Sowa1eb4f752013-07-10 23:00:57 +02002308 }
David Ahern8a14e462018-04-23 11:32:07 -07002309 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 }
2311}
2312
David Ahern6a3e0302018-04-20 15:37:57 -07002313static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2314{
David Aherna68886a2018-04-20 15:38:02 -07002315 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2316 struct fib6_info *from;
2317
2318 rcu_read_lock();
2319 from = rcu_dereference(rt0->from);
2320 if (from)
2321 rt0->dst.expires = from->expires;
2322 rcu_read_unlock();
2323 }
David Ahern6a3e0302018-04-20 15:37:57 -07002324
2325 dst_set_expires(&rt0->dst, timeout);
2326 rt0->rt6i_flags |= RTF_EXPIRES;
2327}
2328
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002329static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2330{
2331 struct net *net = dev_net(rt->dst.dev);
2332
David Ahernd4ead6b2018-04-17 17:33:16 -07002333 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002334 rt->rt6i_flags |= RTF_MODIFIED;
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002335 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2336}
2337
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002338static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2339{
2340 return !(rt->rt6i_flags & RTF_CACHE) &&
Paolo Abeni1490ed22019-02-15 18:15:37 +01002341 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002342}
2343
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002344static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2345 const struct ipv6hdr *iph, u32 mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346{
Julian Anastasov0dec8792017-02-06 23:14:16 +02002347 const struct in6_addr *daddr, *saddr;
Ian Morris67ba4152014-08-24 21:53:10 +01002348 struct rt6_info *rt6 = (struct rt6_info *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002349
Xin Long19bda362016-10-28 18:18:01 +08002350 if (dst_metric_locked(dst, RTAX_MTU))
2351 return;
2352
Julian Anastasov0dec8792017-02-06 23:14:16 +02002353 if (iph) {
2354 daddr = &iph->daddr;
2355 saddr = &iph->saddr;
2356 } else if (sk) {
2357 daddr = &sk->sk_v6_daddr;
2358 saddr = &inet6_sk(sk)->saddr;
2359 } else {
2360 daddr = NULL;
2361 saddr = NULL;
2362 }
2363 dst_confirm_neigh(dst, daddr);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002364 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2365 if (mtu >= dst_mtu(dst))
2366 return;
David S. Miller81aded22012-06-15 14:54:11 -07002367
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002368 if (!rt6_cache_allowed_for_pmtu(rt6)) {
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002369 rt6_do_update_pmtu(rt6, mtu);
Wei Wang2b760fc2017-10-06 12:06:03 -07002370 /* update rt6_ex->stamp for cache */
2371 if (rt6->rt6i_flags & RTF_CACHE)
2372 rt6_update_exception_stamp_rt(rt6);
Julian Anastasov0dec8792017-02-06 23:14:16 +02002373 } else if (daddr) {
David Ahern85bd05d2019-04-16 14:36:01 -07002374 struct fib6_result res = {};
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002375 struct rt6_info *nrt6;
Hagen Paul Pfeifer9d289712015-01-15 22:34:25 +01002376
David Ahern4d85cd02018-04-20 15:37:59 -07002377 rcu_read_lock();
David Ahern85bd05d2019-04-16 14:36:01 -07002378 res.f6i = rcu_dereference(rt6->from);
2379 if (!res.f6i) {
Jonathan Lemon9c69a132019-04-14 14:21:29 -07002380 rcu_read_unlock();
2381 return;
2382 }
David Ahern85bd05d2019-04-16 14:36:01 -07002383 res.nh = &res.f6i->fib6_nh;
David Ahern7d21fec2019-04-16 14:36:11 -07002384 res.fib6_flags = res.f6i->fib6_flags;
2385 res.fib6_type = res.f6i->fib6_type;
2386
David Ahern85bd05d2019-04-16 14:36:01 -07002387 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002388 if (nrt6) {
2389 rt6_do_update_pmtu(nrt6, mtu);
David Ahern5012f0a2019-04-16 14:36:05 -07002390 if (rt6_insert_exception(nrt6, &res))
Wei Wang2b760fc2017-10-06 12:06:03 -07002391 dst_release_immediate(&nrt6->dst);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002392 }
David Aherna68886a2018-04-20 15:38:02 -07002393 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394 }
2395}
2396
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002397static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2398 struct sk_buff *skb, u32 mtu)
2399{
2400 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2401}
2402
David S. Miller42ae66c2012-06-15 20:01:57 -07002403void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002404 int oif, u32 mark, kuid_t uid)
David S. Miller81aded22012-06-15 14:54:11 -07002405{
2406 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2407 struct dst_entry *dst;
Maciej Żenczykowskidc920952018-09-29 23:44:51 -07002408 struct flowi6 fl6 = {
2409 .flowi6_oif = oif,
2410 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2411 .daddr = iph->daddr,
2412 .saddr = iph->saddr,
2413 .flowlabel = ip6_flowinfo(iph),
2414 .flowi6_uid = uid,
2415 };
David S. Miller81aded22012-06-15 14:54:11 -07002416
2417 dst = ip6_route_output(net, NULL, &fl6);
2418 if (!dst->error)
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002419 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
David S. Miller81aded22012-06-15 14:54:11 -07002420 dst_release(dst);
2421}
2422EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2423
2424void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2425{
David Ahern7ddacfa2018-11-18 10:45:30 -08002426 int oif = sk->sk_bound_dev_if;
Martin KaFai Lau33c162a2016-04-11 15:29:36 -07002427 struct dst_entry *dst;
2428
David Ahern7ddacfa2018-11-18 10:45:30 -08002429 if (!oif && skb->dev)
2430 oif = l3mdev_master_ifindex(skb->dev);
2431
2432 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
Martin KaFai Lau33c162a2016-04-11 15:29:36 -07002433
2434 dst = __sk_dst_get(sk);
2435 if (!dst || !dst->obsolete ||
2436 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2437 return;
2438
2439 bh_lock_sock(sk);
2440 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2441 ip6_datagram_dst_update(sk, false);
2442 bh_unlock_sock(sk);
David S. Miller81aded22012-06-15 14:54:11 -07002443}
2444EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2445
Alexey Kodanev7d6850f2018-04-03 15:00:07 +03002446void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2447 const struct flowi6 *fl6)
2448{
2449#ifdef CONFIG_IPV6_SUBTREES
2450 struct ipv6_pinfo *np = inet6_sk(sk);
2451#endif
2452
2453 ip6_dst_store(sk, dst,
2454 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2455 &sk->sk_v6_daddr : NULL,
2456#ifdef CONFIG_IPV6_SUBTREES
2457 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2458 &np->saddr :
2459#endif
2460 NULL);
2461}
2462
David Ahern9b6b35a2019-04-16 14:36:02 -07002463static bool ip6_redirect_nh_match(const struct fib6_result *res,
David Ahern0b34eb02019-04-09 14:41:19 -07002464 struct flowi6 *fl6,
2465 const struct in6_addr *gw,
2466 struct rt6_info **ret)
2467{
David Ahern9b6b35a2019-04-16 14:36:02 -07002468 const struct fib6_nh *nh = res->nh;
2469
David Ahern0b34eb02019-04-09 14:41:19 -07002470 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2471 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2472 return false;
2473
2474 /* rt_cache's gateway might be different from its 'parent'
2475 * in the case of an ip redirect.
2476 * So we keep searching in the exception table if the gateway
2477 * is different.
2478 */
2479 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
2480 struct rt6_info *rt_cache;
2481
David Ahern9b6b35a2019-04-16 14:36:02 -07002482 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
David Ahern0b34eb02019-04-09 14:41:19 -07002483 if (rt_cache &&
2484 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
2485 *ret = rt_cache;
2486 return true;
2487 }
2488 return false;
2489 }
2490 return true;
2491}
2492
Duan Jiongb55b76b2013-09-04 19:44:21 +08002493/* Handle redirects */
2494struct ip6rd_flowi {
2495 struct flowi6 fl6;
2496 struct in6_addr gateway;
2497};
2498
2499static struct rt6_info *__ip6_route_redirect(struct net *net,
2500 struct fib6_table *table,
2501 struct flowi6 *fl6,
David Ahernb75cc8f2018-03-02 08:32:17 -08002502 const struct sk_buff *skb,
Duan Jiongb55b76b2013-09-04 19:44:21 +08002503 int flags)
2504{
2505 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
David Ahern0b34eb02019-04-09 14:41:19 -07002506 struct rt6_info *ret = NULL;
David Ahern9b6b35a2019-04-16 14:36:02 -07002507 struct fib6_result res = {};
David Ahern8d1c8022018-04-17 17:33:26 -07002508 struct fib6_info *rt;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002509 struct fib6_node *fn;
2510
David Ahern31680ac2019-05-22 15:12:18 -07002511 /* l3mdev_update_flow overrides oif if the device is enslaved; in
2512 * this case we must match on the real ingress device, so reset it
2513 */
2514 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2515 fl6->flowi6_oif = skb->dev->ifindex;
2516
Duan Jiongb55b76b2013-09-04 19:44:21 +08002517 /* Get the "current" route for this destination and
Alexander Alemayhu67c408c2017-01-07 23:53:00 +01002518 * check if the redirect has come from appropriate router.
Duan Jiongb55b76b2013-09-04 19:44:21 +08002519 *
2520 * RFC 4861 specifies that redirects should only be
2521 * accepted if they come from the nexthop to the target.
2522 * Due to the way the routes are chosen, this notion
2523 * is a bit fuzzy and one might need to check all possible
2524 * routes.
2525 */
2526
Wei Wang66f5d6c2017-10-06 12:06:10 -07002527 rcu_read_lock();
David Ahern64547432018-05-09 20:34:19 -07002528 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
Duan Jiongb55b76b2013-09-04 19:44:21 +08002529restart:
Wei Wang66f5d6c2017-10-06 12:06:10 -07002530 for_each_fib6_node_rt_rcu(fn) {
David Ahern9b6b35a2019-04-16 14:36:02 -07002531 res.f6i = rt;
2532 res.nh = &rt->fib6_nh;
2533
David Ahern14895682018-04-17 17:33:17 -07002534 if (fib6_check_expired(rt))
Duan Jiongb55b76b2013-09-04 19:44:21 +08002535 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07002536 if (rt->fib6_flags & RTF_REJECT)
Duan Jiongb55b76b2013-09-04 19:44:21 +08002537 break;
David Ahern9b6b35a2019-04-16 14:36:02 -07002538 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway, &ret))
David Ahern0b34eb02019-04-09 14:41:19 -07002539 goto out;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002540 }
2541
2542 if (!rt)
David Ahern421842e2018-04-17 17:33:18 -07002543 rt = net->ipv6.fib6_null_entry;
David Ahern93c2fb22018-04-18 15:38:59 -07002544 else if (rt->fib6_flags & RTF_REJECT) {
David Ahern23fb93a2018-04-17 17:33:23 -07002545 ret = net->ipv6.ip6_null_entry;
Martin KaFai Laub0a1ba52015-01-20 19:16:02 -08002546 goto out;
2547 }
2548
David Ahern421842e2018-04-17 17:33:18 -07002549 if (rt == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07002550 fn = fib6_backtrack(fn, &fl6->saddr);
2551 if (fn)
2552 goto restart;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002553 }
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07002554
David Ahern9b6b35a2019-04-16 14:36:02 -07002555 res.f6i = rt;
2556 res.nh = &rt->fib6_nh;
Martin KaFai Laub0a1ba52015-01-20 19:16:02 -08002557out:
David Ahern7d21fec2019-04-16 14:36:11 -07002558 if (ret) {
David Ahern10585b42019-03-20 09:24:50 -07002559 ip6_hold_safe(net, &ret);
David Ahern7d21fec2019-04-16 14:36:11 -07002560 } else {
2561 res.fib6_flags = res.f6i->fib6_flags;
2562 res.fib6_type = res.f6i->fib6_type;
David Ahern9b6b35a2019-04-16 14:36:02 -07002563 ret = ip6_create_rt_rcu(&res);
David Ahern7d21fec2019-04-16 14:36:11 -07002564 }
Duan Jiongb55b76b2013-09-04 19:44:21 +08002565
Wei Wang66f5d6c2017-10-06 12:06:10 -07002566 rcu_read_unlock();
Duan Jiongb55b76b2013-09-04 19:44:21 +08002567
David Ahern8ff2e5b2019-04-16 14:36:09 -07002568 trace_fib6_table_lookup(net, &res, table, fl6);
David Ahern23fb93a2018-04-17 17:33:23 -07002569 return ret;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002570};
2571
2572static struct dst_entry *ip6_route_redirect(struct net *net,
David Ahernb75cc8f2018-03-02 08:32:17 -08002573 const struct flowi6 *fl6,
2574 const struct sk_buff *skb,
2575 const struct in6_addr *gateway)
Duan Jiongb55b76b2013-09-04 19:44:21 +08002576{
2577 int flags = RT6_LOOKUP_F_HAS_SADDR;
2578 struct ip6rd_flowi rdfl;
2579
2580 rdfl.fl6 = *fl6;
2581 rdfl.gateway = *gateway;
2582
David Ahernb75cc8f2018-03-02 08:32:17 -08002583 return fib6_rule_lookup(net, &rdfl.fl6, skb,
Duan Jiongb55b76b2013-09-04 19:44:21 +08002584 flags, __ip6_route_redirect);
2585}
2586
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002587void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2588 kuid_t uid)
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002589{
2590 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2591 struct dst_entry *dst;
Maciej Żenczykowski1f7f10a2018-09-29 23:44:48 -07002592 struct flowi6 fl6 = {
2593 .flowi6_iif = LOOPBACK_IFINDEX,
2594 .flowi6_oif = oif,
2595 .flowi6_mark = mark,
2596 .daddr = iph->daddr,
2597 .saddr = iph->saddr,
2598 .flowlabel = ip6_flowinfo(iph),
2599 .flowi6_uid = uid,
2600 };
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002601
David Ahernb75cc8f2018-03-02 08:32:17 -08002602 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
Duan Jiongb55b76b2013-09-04 19:44:21 +08002603 rt6_do_redirect(dst, NULL, skb);
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002604 dst_release(dst);
2605}
2606EXPORT_SYMBOL_GPL(ip6_redirect);
2607
Maciej Żenczykowskid4563362018-09-29 23:44:50 -07002608void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
Duan Jiongc92a59e2013-08-22 12:07:35 +08002609{
2610 const struct ipv6hdr *iph = ipv6_hdr(skb);
2611 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2612 struct dst_entry *dst;
Maciej Żenczykowski0b26fb12018-09-29 23:44:49 -07002613 struct flowi6 fl6 = {
2614 .flowi6_iif = LOOPBACK_IFINDEX,
2615 .flowi6_oif = oif,
Maciej Żenczykowski0b26fb12018-09-29 23:44:49 -07002616 .daddr = msg->dest,
2617 .saddr = iph->daddr,
2618 .flowi6_uid = sock_net_uid(net, NULL),
2619 };
Duan Jiongc92a59e2013-08-22 12:07:35 +08002620
David Ahernb75cc8f2018-03-02 08:32:17 -08002621 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
Duan Jiongb55b76b2013-09-04 19:44:21 +08002622 rt6_do_redirect(dst, NULL, skb);
Duan Jiongc92a59e2013-08-22 12:07:35 +08002623 dst_release(dst);
2624}
2625
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002626void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2627{
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002628 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2629 sk->sk_uid);
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002630}
2631EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2632
David S. Miller0dbaee32010-12-13 12:52:14 -08002633static unsigned int ip6_default_advmss(const struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634{
David S. Miller0dbaee32010-12-13 12:52:14 -08002635 struct net_device *dev = dst->dev;
2636 unsigned int mtu = dst_mtu(dst);
2637 struct net *net = dev_net(dev);
2638
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2640
Daniel Lezcano55786892008-03-04 13:47:47 -08002641 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2642 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643
2644 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002645 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2646 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2647 * IPV6_MAXPLEN is also valid and means: "any MSS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 * rely only on pmtu discovery"
2649 */
2650 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2651 mtu = IPV6_MAXPLEN;
2652 return mtu;
2653}
2654
Steffen Klassertebb762f2011-11-23 02:12:51 +00002655static unsigned int ip6_mtu(const struct dst_entry *dst)
David S. Millerd33e4552010-12-14 13:01:14 -08002656{
David S. Millerd33e4552010-12-14 13:01:14 -08002657 struct inet6_dev *idev;
David Ahernd4ead6b2018-04-17 17:33:16 -07002658 unsigned int mtu;
Steffen Klassert618f9bc2011-11-23 02:13:31 +00002659
Martin KaFai Lau4b32b5a2015-04-28 13:03:06 -07002660 mtu = dst_metric_raw(dst, RTAX_MTU);
2661 if (mtu)
2662 goto out;
2663
Steffen Klassert618f9bc2011-11-23 02:13:31 +00002664 mtu = IPV6_MIN_MTU;
David S. Millerd33e4552010-12-14 13:01:14 -08002665
2666 rcu_read_lock();
2667 idev = __in6_dev_get(dst->dev);
2668 if (idev)
2669 mtu = idev->cnf.mtu6;
2670 rcu_read_unlock();
2671
Eric Dumazet30f78d82014-04-10 21:23:36 -07002672out:
Roopa Prabhu14972cb2016-08-24 20:10:43 -07002673 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2674
2675 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
David S. Millerd33e4552010-12-14 13:01:14 -08002676}
2677
David Ahern901731b2018-05-21 09:08:14 -07002678/* MTU selection:
2679 * 1. mtu on route is locked - use it
2680 * 2. mtu from nexthop exception
2681 * 3. mtu from egress device
2682 *
2683 * based on ip6_dst_mtu_forward and exception logic of
2684 * rt6_find_cached_rt; called with rcu_read_lock
2685 */
David Ahernb748f262019-04-16 14:36:06 -07002686u32 ip6_mtu_from_fib6(const struct fib6_result *res,
2687 const struct in6_addr *daddr,
2688 const struct in6_addr *saddr)
David Ahern901731b2018-05-21 09:08:14 -07002689{
David Ahernb748f262019-04-16 14:36:06 -07002690 const struct fib6_nh *nh = res->nh;
2691 struct fib6_info *f6i = res->f6i;
David Ahern901731b2018-05-21 09:08:14 -07002692 struct inet6_dev *idev;
Wei Wang510e2ce2019-05-16 13:30:54 -07002693 struct rt6_info *rt;
David Ahern901731b2018-05-21 09:08:14 -07002694 u32 mtu = 0;
2695
2696 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
2697 mtu = f6i->fib6_pmtu;
2698 if (mtu)
2699 goto out;
2700 }
2701
Wei Wang510e2ce2019-05-16 13:30:54 -07002702 rt = rt6_find_cached_rt(res, daddr, saddr);
2703 if (unlikely(rt)) {
2704 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
2705 } else {
David Ahernb748f262019-04-16 14:36:06 -07002706 struct net_device *dev = nh->fib_nh_dev;
David Ahern901731b2018-05-21 09:08:14 -07002707
2708 mtu = IPV6_MIN_MTU;
2709 idev = __in6_dev_get(dev);
2710 if (idev && idev->cnf.mtu6 > mtu)
2711 mtu = idev->cnf.mtu6;
2712 }
2713
2714 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2715out:
David Ahernb748f262019-04-16 14:36:06 -07002716 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
David Ahern901731b2018-05-21 09:08:14 -07002717}
2718
YOSHIFUJI Hideaki3b009442007-12-06 16:11:48 -08002719struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
David S. Miller87a11572011-12-06 17:04:13 -05002720 struct flowi6 *fl6)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721{
David S. Miller87a11572011-12-06 17:04:13 -05002722 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 struct rt6_info *rt;
2724 struct inet6_dev *idev = in6_dev_get(dev);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002725 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726
David S. Miller38308472011-12-03 18:02:47 -05002727 if (unlikely(!idev))
Eric Dumazet122bdf62012-03-14 21:13:11 +00002728 return ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729
Martin KaFai Lauad706862015-08-14 11:05:52 -07002730 rt = ip6_dst_alloc(net, dev, 0);
David S. Miller38308472011-12-03 18:02:47 -05002731 if (unlikely(!rt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 in6_dev_put(idev);
David S. Miller87a11572011-12-06 17:04:13 -05002733 dst = ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 goto out;
2735 }
2736
Yan, Zheng8e2ec632011-09-05 21:34:30 +00002737 rt->dst.flags |= DST_HOST;
Brendan McGrath588753f2017-12-13 22:14:57 +11002738 rt->dst.input = ip6_input;
Yan, Zheng8e2ec632011-09-05 21:34:30 +00002739 rt->dst.output = ip6_output;
Julian Anastasov550bab42013-10-20 15:43:04 +03002740 rt->rt6i_gateway = fl6->daddr;
David S. Miller87a11572011-12-06 17:04:13 -05002741 rt->rt6i_dst.addr = fl6->daddr;
Yan, Zheng8e2ec632011-09-05 21:34:30 +00002742 rt->rt6i_dst.plen = 128;
2743 rt->rt6i_idev = idev;
Li RongQing14edd872012-10-24 14:01:18 +08002744 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745
Ido Schimmel4c981e22018-01-07 12:45:04 +02002746 /* Add this dst into uncached_list so that rt6_disable_ip() can
Wei Wang587fea72017-06-17 10:42:36 -07002747 * do proper release of the net_device
2748 */
2749 rt6_uncached_list_add(rt);
Wei Wang81eb8442017-10-06 12:06:11 -07002750 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751
David S. Miller87a11572011-12-06 17:04:13 -05002752 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2753
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754out:
David S. Miller87a11572011-12-06 17:04:13 -05002755 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756}
2757
Daniel Lezcano569d3642008-01-18 03:56:57 -08002758static int ip6_dst_gc(struct dst_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759{
Alexey Dobriyan86393e52009-08-29 01:34:49 +00002760 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
Daniel Lezcano7019b782008-03-04 13:50:14 -08002761 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2762 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2763 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2764 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2765 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
Eric Dumazetfc66f952010-10-08 06:37:34 +00002766 int entries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
Eric Dumazetfc66f952010-10-08 06:37:34 +00002768 entries = dst_entries_get_fast(ops);
Michal Kubeček49a18d82013-08-01 10:04:24 +02002769 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
Eric Dumazetfc66f952010-10-08 06:37:34 +00002770 entries <= rt_max_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 goto out;
2772
Benjamin Thery6891a342008-03-04 13:49:47 -08002773 net->ipv6.ip6_rt_gc_expire++;
Li RongQing14956642014-05-19 17:30:28 +08002774 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
Eric Dumazetfc66f952010-10-08 06:37:34 +00002775 entries = dst_entries_get_slow(ops);
2776 if (entries < ops->gc_thresh)
Daniel Lezcano7019b782008-03-04 13:50:14 -08002777 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778out:
Daniel Lezcano7019b782008-03-04 13:50:14 -08002779 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
Eric Dumazetfc66f952010-10-08 06:37:34 +00002780 return entries > rt_max_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781}
2782
David Ahern8c145862016-04-24 21:26:04 -07002783static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2784 struct fib6_config *cfg,
David Ahernf4797b32018-01-25 16:55:08 -08002785 const struct in6_addr *gw_addr,
2786 u32 tbid, int flags)
David Ahern8c145862016-04-24 21:26:04 -07002787{
2788 struct flowi6 fl6 = {
2789 .flowi6_oif = cfg->fc_ifindex,
2790 .daddr = *gw_addr,
2791 .saddr = cfg->fc_prefsrc,
2792 };
2793 struct fib6_table *table;
2794 struct rt6_info *rt;
David Ahern8c145862016-04-24 21:26:04 -07002795
David Ahernf4797b32018-01-25 16:55:08 -08002796 table = fib6_get_table(net, tbid);
David Ahern8c145862016-04-24 21:26:04 -07002797 if (!table)
2798 return NULL;
2799
2800 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2801 flags |= RT6_LOOKUP_F_HAS_SADDR;
2802
David Ahernf4797b32018-01-25 16:55:08 -08002803 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
David Ahernb75cc8f2018-03-02 08:32:17 -08002804 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
David Ahern8c145862016-04-24 21:26:04 -07002805
2806 /* if table lookup failed, fall back to full lookup */
2807 if (rt == net->ipv6.ip6_null_entry) {
2808 ip6_rt_put(rt);
2809 rt = NULL;
2810 }
2811
2812 return rt;
2813}
2814
David Ahernfc1e64e2018-01-25 16:55:09 -08002815static int ip6_route_check_nh_onlink(struct net *net,
2816 struct fib6_config *cfg,
David Ahern9fbb7042018-03-13 08:29:36 -07002817 const struct net_device *dev,
David Ahernfc1e64e2018-01-25 16:55:09 -08002818 struct netlink_ext_ack *extack)
2819{
David Ahern44750f82018-02-06 13:17:06 -08002820 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
David Ahernfc1e64e2018-01-25 16:55:09 -08002821 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2822 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
Paolo Abenibf1dc8b2019-02-21 11:19:42 +01002823 struct fib6_info *from;
David Ahernfc1e64e2018-01-25 16:55:09 -08002824 struct rt6_info *grt;
2825 int err;
2826
2827 err = 0;
2828 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2829 if (grt) {
Paolo Abenibf1dc8b2019-02-21 11:19:42 +01002830 rcu_read_lock();
2831 from = rcu_dereference(grt->from);
David Ahern58e354c2018-02-06 12:14:12 -08002832 if (!grt->dst.error &&
David Ahern4ed591c2018-10-24 13:58:39 -07002833 /* ignore match if it is the default route */
Paolo Abenibf1dc8b2019-02-21 11:19:42 +01002834 from && !ipv6_addr_any(&from->fib6_dst.addr) &&
David Ahern58e354c2018-02-06 12:14:12 -08002835 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
David Ahern44750f82018-02-06 13:17:06 -08002836 NL_SET_ERR_MSG(extack,
2837 "Nexthop has invalid gateway or device mismatch");
David Ahernfc1e64e2018-01-25 16:55:09 -08002838 err = -EINVAL;
2839 }
Paolo Abenibf1dc8b2019-02-21 11:19:42 +01002840 rcu_read_unlock();
David Ahernfc1e64e2018-01-25 16:55:09 -08002841
2842 ip6_rt_put(grt);
2843 }
2844
2845 return err;
2846}
2847
David Ahern1edce992018-01-25 16:55:07 -08002848static int ip6_route_check_nh(struct net *net,
2849 struct fib6_config *cfg,
2850 struct net_device **_dev,
2851 struct inet6_dev **idev)
2852{
2853 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2854 struct net_device *dev = _dev ? *_dev : NULL;
2855 struct rt6_info *grt = NULL;
2856 int err = -EHOSTUNREACH;
2857
2858 if (cfg->fc_table) {
David Ahernf4797b32018-01-25 16:55:08 -08002859 int flags = RT6_LOOKUP_F_IFACE;
2860
2861 grt = ip6_nh_lookup_table(net, cfg, gw_addr,
2862 cfg->fc_table, flags);
David Ahern1edce992018-01-25 16:55:07 -08002863 if (grt) {
2864 if (grt->rt6i_flags & RTF_GATEWAY ||
2865 (dev && dev != grt->dst.dev)) {
2866 ip6_rt_put(grt);
2867 grt = NULL;
2868 }
2869 }
2870 }
2871
2872 if (!grt)
David Ahernb75cc8f2018-03-02 08:32:17 -08002873 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
David Ahern1edce992018-01-25 16:55:07 -08002874
2875 if (!grt)
2876 goto out;
2877
2878 if (dev) {
2879 if (dev != grt->dst.dev) {
2880 ip6_rt_put(grt);
2881 goto out;
2882 }
2883 } else {
2884 *_dev = dev = grt->dst.dev;
2885 *idev = grt->rt6i_idev;
2886 dev_hold(dev);
2887 in6_dev_hold(grt->rt6i_idev);
2888 }
2889
2890 if (!(grt->rt6i_flags & RTF_GATEWAY))
2891 err = 0;
2892
2893 ip6_rt_put(grt);
2894
2895out:
2896 return err;
2897}
2898
David Ahern9fbb7042018-03-13 08:29:36 -07002899static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
2900 struct net_device **_dev, struct inet6_dev **idev,
2901 struct netlink_ext_ack *extack)
2902{
2903 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2904 int gwa_type = ipv6_addr_type(gw_addr);
David Ahern232378e2018-03-13 08:29:37 -07002905 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
David Ahern9fbb7042018-03-13 08:29:36 -07002906 const struct net_device *dev = *_dev;
David Ahern232378e2018-03-13 08:29:37 -07002907 bool need_addr_check = !dev;
David Ahern9fbb7042018-03-13 08:29:36 -07002908 int err = -EINVAL;
2909
2910 /* if gw_addr is local we will fail to detect this in case
2911 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2912 * will return already-added prefix route via interface that
2913 * prefix route was assigned to, which might be non-loopback.
2914 */
David Ahern232378e2018-03-13 08:29:37 -07002915 if (dev &&
2916 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2917 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
David Ahern9fbb7042018-03-13 08:29:36 -07002918 goto out;
2919 }
2920
2921 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
2922 /* IPv6 strictly inhibits using not link-local
2923 * addresses as nexthop address.
2924 * Otherwise, router will not able to send redirects.
2925 * It is very good, but in some (rare!) circumstances
2926 * (SIT, PtP, NBMA NOARP links) it is handy to allow
2927 * some exceptions. --ANK
2928 * We allow IPv4-mapped nexthops to support RFC4798-type
2929 * addressing
2930 */
2931 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
2932 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2933 goto out;
2934 }
2935
2936 if (cfg->fc_flags & RTNH_F_ONLINK)
2937 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
2938 else
2939 err = ip6_route_check_nh(net, cfg, _dev, idev);
2940
2941 if (err)
2942 goto out;
2943 }
2944
2945 /* reload in case device was changed */
2946 dev = *_dev;
2947
2948 err = -EINVAL;
2949 if (!dev) {
2950 NL_SET_ERR_MSG(extack, "Egress device not specified");
2951 goto out;
2952 } else if (dev->flags & IFF_LOOPBACK) {
2953 NL_SET_ERR_MSG(extack,
2954 "Egress device can not be loopback device for this route");
2955 goto out;
2956 }
David Ahern232378e2018-03-13 08:29:37 -07002957
2958 /* if we did not check gw_addr above, do so now that the
2959 * egress device has been resolved.
2960 */
2961 if (need_addr_check &&
2962 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2963 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2964 goto out;
2965 }
2966
David Ahern9fbb7042018-03-13 08:29:36 -07002967 err = 0;
2968out:
2969 return err;
2970}
2971
David Ahern83c442512019-03-27 20:53:50 -07002972static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
2973{
2974 if ((flags & RTF_REJECT) ||
2975 (dev && (dev->flags & IFF_LOOPBACK) &&
2976 !(addr_type & IPV6_ADDR_LOOPBACK) &&
2977 !(flags & RTF_LOCAL)))
2978 return true;
2979
2980 return false;
2981}
2982
2983int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
2984 struct fib6_config *cfg, gfp_t gfp_flags,
2985 struct netlink_ext_ack *extack)
2986{
2987 struct net_device *dev = NULL;
2988 struct inet6_dev *idev = NULL;
2989 int addr_type;
2990 int err;
2991
David Ahernf1741732019-03-27 20:53:57 -07002992 fib6_nh->fib_nh_family = AF_INET6;
2993
David Ahern83c442512019-03-27 20:53:50 -07002994 err = -ENODEV;
2995 if (cfg->fc_ifindex) {
2996 dev = dev_get_by_index(net, cfg->fc_ifindex);
2997 if (!dev)
2998 goto out;
2999 idev = in6_dev_get(dev);
3000 if (!idev)
3001 goto out;
3002 }
3003
3004 if (cfg->fc_flags & RTNH_F_ONLINK) {
3005 if (!dev) {
3006 NL_SET_ERR_MSG(extack,
3007 "Nexthop device required for onlink");
3008 goto out;
3009 }
3010
3011 if (!(dev->flags & IFF_UP)) {
3012 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3013 err = -ENETDOWN;
3014 goto out;
3015 }
3016
David Ahernad1601a2019-03-27 20:53:56 -07003017 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
David Ahern83c442512019-03-27 20:53:50 -07003018 }
3019
David Ahernad1601a2019-03-27 20:53:56 -07003020 fib6_nh->fib_nh_weight = 1;
David Ahern83c442512019-03-27 20:53:50 -07003021
3022 /* We cannot add true routes via loopback here,
3023 * they would result in kernel looping; promote them to reject routes
3024 */
3025 addr_type = ipv6_addr_type(&cfg->fc_dst);
3026 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3027 /* hold loopback dev/idev if we haven't done so. */
3028 if (dev != net->loopback_dev) {
3029 if (dev) {
3030 dev_put(dev);
3031 in6_dev_put(idev);
3032 }
3033 dev = net->loopback_dev;
3034 dev_hold(dev);
3035 idev = in6_dev_get(dev);
3036 if (!idev) {
3037 err = -ENODEV;
3038 goto out;
3039 }
3040 }
3041 goto set_dev;
3042 }
3043
3044 if (cfg->fc_flags & RTF_GATEWAY) {
3045 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3046 if (err)
3047 goto out;
3048
David Ahernad1601a2019-03-27 20:53:56 -07003049 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
David Ahernbdf00462019-04-05 16:30:26 -07003050 fib6_nh->fib_nh_gw_family = AF_INET6;
David Ahern83c442512019-03-27 20:53:50 -07003051 }
3052
3053 err = -ENODEV;
3054 if (!dev)
3055 goto out;
3056
3057 if (idev->cnf.disable_ipv6) {
3058 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3059 err = -EACCES;
3060 goto out;
3061 }
3062
3063 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3064 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3065 err = -ENETDOWN;
3066 goto out;
3067 }
3068
3069 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3070 !netif_carrier_ok(dev))
David Ahernad1601a2019-03-27 20:53:56 -07003071 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
David Ahern83c442512019-03-27 20:53:50 -07003072
David Ahern979e2762019-03-27 20:53:58 -07003073 err = fib_nh_common_init(&fib6_nh->nh_common, cfg->fc_encap,
3074 cfg->fc_encap_type, cfg, gfp_flags, extack);
3075 if (err)
3076 goto out;
David Ahern83c442512019-03-27 20:53:50 -07003077set_dev:
David Ahernad1601a2019-03-27 20:53:56 -07003078 fib6_nh->fib_nh_dev = dev;
David Ahernf1741732019-03-27 20:53:57 -07003079 fib6_nh->fib_nh_oif = dev->ifindex;
David Ahern83c442512019-03-27 20:53:50 -07003080 err = 0;
3081out:
3082 if (idev)
3083 in6_dev_put(idev);
3084
3085 if (err) {
David Ahernad1601a2019-03-27 20:53:56 -07003086 lwtstate_put(fib6_nh->fib_nh_lws);
3087 fib6_nh->fib_nh_lws = NULL;
David Ahern83c442512019-03-27 20:53:50 -07003088 if (dev)
3089 dev_put(dev);
3090 }
3091
3092 return err;
3093}
3094
David Aherndac7d0f2019-03-27 20:53:51 -07003095void fib6_nh_release(struct fib6_nh *fib6_nh)
3096{
David Ahern979e2762019-03-27 20:53:58 -07003097 fib_nh_common_release(&fib6_nh->nh_common);
David Aherndac7d0f2019-03-27 20:53:51 -07003098}
3099
David Ahern8d1c8022018-04-17 17:33:26 -07003100static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
David Ahernacb54e32018-04-17 17:33:22 -07003101 gfp_t gfp_flags,
David Ahern333c4302017-05-21 10:12:04 -06003102 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003103{
Daniel Lezcano55786892008-03-04 13:47:47 -08003104 struct net *net = cfg->fc_nlinfo.nl_net;
David Ahern8d1c8022018-04-17 17:33:26 -07003105 struct fib6_info *rt = NULL;
Thomas Grafc71099a2006-08-04 23:20:06 -07003106 struct fib6_table *table;
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003107 int err = -EINVAL;
David Ahern83c442512019-03-27 20:53:50 -07003108 int addr_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109
David Ahern557c44b2017-04-19 14:19:43 -07003110 /* RTF_PCPU is an internal flag; can not be set by userspace */
David Ahernd5d531c2017-05-21 10:12:05 -06003111 if (cfg->fc_flags & RTF_PCPU) {
3112 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
David Ahern557c44b2017-04-19 14:19:43 -07003113 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06003114 }
David Ahern557c44b2017-04-19 14:19:43 -07003115
Wei Wang2ea23522017-10-27 17:30:12 -07003116 /* RTF_CACHE is an internal flag; can not be set by userspace */
3117 if (cfg->fc_flags & RTF_CACHE) {
3118 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3119 goto out;
3120 }
3121
David Aherne8478e82018-04-17 17:33:13 -07003122 if (cfg->fc_type > RTN_MAX) {
3123 NL_SET_ERR_MSG(extack, "Invalid route type");
3124 goto out;
3125 }
3126
David Ahernd5d531c2017-05-21 10:12:05 -06003127 if (cfg->fc_dst_len > 128) {
3128 NL_SET_ERR_MSG(extack, "Invalid prefix length");
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003129 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06003130 }
3131 if (cfg->fc_src_len > 128) {
3132 NL_SET_ERR_MSG(extack, "Invalid source address length");
3133 goto out;
3134 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135#ifndef CONFIG_IPV6_SUBTREES
David Ahernd5d531c2017-05-21 10:12:05 -06003136 if (cfg->fc_src_len) {
3137 NL_SET_ERR_MSG(extack,
3138 "Specifying source address requires IPV6_SUBTREES to be enabled");
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003139 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06003140 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003141#endif
David Ahernfc1e64e2018-01-25 16:55:09 -08003142
Matti Vaittinend71314b2011-11-14 00:14:49 +00003143 err = -ENOBUFS;
David S. Miller38308472011-12-03 18:02:47 -05003144 if (cfg->fc_nlinfo.nlh &&
3145 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
Matti Vaittinend71314b2011-11-14 00:14:49 +00003146 table = fib6_get_table(net, cfg->fc_table);
David S. Miller38308472011-12-03 18:02:47 -05003147 if (!table) {
Joe Perchesf3213832012-05-15 14:11:53 +00003148 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
Matti Vaittinend71314b2011-11-14 00:14:49 +00003149 table = fib6_new_table(net, cfg->fc_table);
3150 }
3151 } else {
3152 table = fib6_new_table(net, cfg->fc_table);
3153 }
David S. Miller38308472011-12-03 18:02:47 -05003154
3155 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07003156 goto out;
Thomas Grafc71099a2006-08-04 23:20:06 -07003157
David Ahern93531c62018-04-17 17:33:25 -07003158 err = -ENOMEM;
3159 rt = fib6_info_alloc(gfp_flags);
3160 if (!rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161 goto out;
David Ahern93531c62018-04-17 17:33:25 -07003162
David Ahernd7e774f2018-11-06 12:51:15 -08003163 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3164 extack);
David Ahern767a2212018-10-04 20:07:51 -07003165 if (IS_ERR(rt->fib6_metrics)) {
3166 err = PTR_ERR(rt->fib6_metrics);
Eric Dumazetfda21d42018-10-05 09:17:50 -07003167 /* Do not leave garbage there. */
3168 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
David Ahern767a2212018-10-04 20:07:51 -07003169 goto out;
3170 }
3171
David Ahern93531c62018-04-17 17:33:25 -07003172 if (cfg->fc_flags & RTF_ADDRCONF)
3173 rt->dst_nocount = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174
Gao feng1716a962012-04-06 00:13:10 +00003175 if (cfg->fc_flags & RTF_EXPIRES)
David Ahern14895682018-04-17 17:33:17 -07003176 fib6_set_expires(rt, jiffies +
Gao feng1716a962012-04-06 00:13:10 +00003177 clock_t_to_jiffies(cfg->fc_expires));
3178 else
David Ahern14895682018-04-17 17:33:17 -07003179 fib6_clean_expires(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180
Thomas Graf86872cb2006-08-22 00:01:08 -07003181 if (cfg->fc_protocol == RTPROT_UNSPEC)
3182 cfg->fc_protocol = RTPROT_BOOT;
David Ahern93c2fb22018-04-18 15:38:59 -07003183 rt->fib6_protocol = cfg->fc_protocol;
Thomas Graf86872cb2006-08-22 00:01:08 -07003184
David Ahern83c442512019-03-27 20:53:50 -07003185 rt->fib6_table = table;
3186 rt->fib6_metric = cfg->fc_metric;
David Ahernc7036d92019-06-19 10:50:24 -07003187 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
David Ahern2b2450c2019-03-27 20:53:52 -07003188 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
Roopa Prabhu19e42e42015-07-21 10:43:48 +02003189
David Ahern93c2fb22018-04-18 15:38:59 -07003190 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3191 rt->fib6_dst.plen = cfg->fc_dst_len;
3192 if (rt->fib6_dst.plen == 128)
David Ahern3b6761d2018-04-17 17:33:20 -07003193 rt->dst_host = true;
Michal Kubečeke5fd3872014-03-27 13:04:08 +01003194
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195#ifdef CONFIG_IPV6_SUBTREES
David Ahern93c2fb22018-04-18 15:38:59 -07003196 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3197 rt->fib6_src.plen = cfg->fc_src_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198#endif
David Ahern83c442512019-03-27 20:53:50 -07003199 err = fib6_nh_init(net, &rt->fib6_nh, cfg, gfp_flags, extack);
3200 if (err)
3201 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003202
3203 /* We cannot add true routes via loopback here,
David Ahern83c442512019-03-27 20:53:50 -07003204 * they would result in kernel looping; promote them to reject routes
Linus Torvalds1da177e2005-04-16 15:20:36 -07003205 */
David Ahern83c442512019-03-27 20:53:50 -07003206 addr_type = ipv6_addr_type(&cfg->fc_dst);
David Ahernad1601a2019-03-27 20:53:56 -07003207 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh.fib_nh_dev, addr_type))
David Ahern83c442512019-03-27 20:53:50 -07003208 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
David Ahern955ec4c2018-01-24 19:45:29 -08003209
Daniel Walterc3968a82011-04-13 21:10:57 +00003210 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
David Ahern83c442512019-03-27 20:53:50 -07003211 struct net_device *dev = fib6_info_nh_dev(rt);
3212
Daniel Walterc3968a82011-04-13 21:10:57 +00003213 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
David Ahernd5d531c2017-05-21 10:12:05 -06003214 NL_SET_ERR_MSG(extack, "Invalid source address");
Daniel Walterc3968a82011-04-13 21:10:57 +00003215 err = -EINVAL;
3216 goto out;
3217 }
David Ahern93c2fb22018-04-18 15:38:59 -07003218 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3219 rt->fib6_prefsrc.plen = 128;
Daniel Walterc3968a82011-04-13 21:10:57 +00003220 } else
David Ahern93c2fb22018-04-18 15:38:59 -07003221 rt->fib6_prefsrc.plen = 0;
Daniel Walterc3968a82011-04-13 21:10:57 +00003222
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003223 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003224out:
David Ahern93531c62018-04-17 17:33:25 -07003225 fib6_info_release(rt);
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003226 return ERR_PTR(err);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003227}
3228
David Ahernacb54e32018-04-17 17:33:22 -07003229int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
David Ahern333c4302017-05-21 10:12:04 -06003230 struct netlink_ext_ack *extack)
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003231{
David Ahern8d1c8022018-04-17 17:33:26 -07003232 struct fib6_info *rt;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003233 int err;
3234
David Ahernacb54e32018-04-17 17:33:22 -07003235 rt = ip6_route_info_create(cfg, gfp_flags, extack);
David Ahernd4ead6b2018-04-17 17:33:16 -07003236 if (IS_ERR(rt))
3237 return PTR_ERR(rt);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003238
David Ahernd4ead6b2018-04-17 17:33:16 -07003239 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
David Ahern93531c62018-04-17 17:33:25 -07003240 fib6_info_release(rt);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003241
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242 return err;
3243}
3244
David Ahern8d1c8022018-04-17 17:33:26 -07003245static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003246{
David Ahernafb1d4b52018-04-17 17:33:11 -07003247 struct net *net = info->nl_net;
Thomas Grafc71099a2006-08-04 23:20:06 -07003248 struct fib6_table *table;
David Ahernafb1d4b52018-04-17 17:33:11 -07003249 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250
David Ahern421842e2018-04-17 17:33:18 -07003251 if (rt == net->ipv6.fib6_null_entry) {
Gao feng6825a262012-09-19 19:25:34 +00003252 err = -ENOENT;
3253 goto out;
3254 }
Patrick McHardy6c813a72006-08-06 22:22:47 -07003255
David Ahern93c2fb22018-04-18 15:38:59 -07003256 table = rt->fib6_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003257 spin_lock_bh(&table->tb6_lock);
Thomas Graf86872cb2006-08-22 00:01:08 -07003258 err = fib6_del(rt, info);
Wei Wang66f5d6c2017-10-06 12:06:10 -07003259 spin_unlock_bh(&table->tb6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260
Gao feng6825a262012-09-19 19:25:34 +00003261out:
David Ahern93531c62018-04-17 17:33:25 -07003262 fib6_info_release(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263 return err;
3264}
3265
David Ahern8d1c8022018-04-17 17:33:26 -07003266int ip6_del_rt(struct net *net, struct fib6_info *rt)
Thomas Grafe0a1ad732006-08-22 00:00:21 -07003267{
David Ahernafb1d4b52018-04-17 17:33:11 -07003268 struct nl_info info = { .nl_net = net };
3269
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08003270 return __ip6_del_rt(rt, &info);
Thomas Grafe0a1ad732006-08-22 00:00:21 -07003271}
3272
David Ahern8d1c8022018-04-17 17:33:26 -07003273static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
David Ahern0ae81332017-02-02 12:37:08 -08003274{
3275 struct nl_info *info = &cfg->fc_nlinfo;
WANG Conge3330032017-02-27 16:07:43 -08003276 struct net *net = info->nl_net;
David Ahern16a16cd2017-02-02 12:37:11 -08003277 struct sk_buff *skb = NULL;
David Ahern0ae81332017-02-02 12:37:08 -08003278 struct fib6_table *table;
WANG Conge3330032017-02-27 16:07:43 -08003279 int err = -ENOENT;
David Ahern0ae81332017-02-02 12:37:08 -08003280
David Ahern421842e2018-04-17 17:33:18 -07003281 if (rt == net->ipv6.fib6_null_entry)
WANG Conge3330032017-02-27 16:07:43 -08003282 goto out_put;
David Ahern93c2fb22018-04-18 15:38:59 -07003283 table = rt->fib6_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003284 spin_lock_bh(&table->tb6_lock);
David Ahern0ae81332017-02-02 12:37:08 -08003285
David Ahern93c2fb22018-04-18 15:38:59 -07003286 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
David Ahern8d1c8022018-04-17 17:33:26 -07003287 struct fib6_info *sibling, *next_sibling;
David Ahern0ae81332017-02-02 12:37:08 -08003288
David Ahern16a16cd2017-02-02 12:37:11 -08003289 /* prefer to send a single notification with all hops */
3290 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3291 if (skb) {
3292 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3293
David Ahernd4ead6b2018-04-17 17:33:16 -07003294 if (rt6_fill_node(net, skb, rt, NULL,
David Ahern16a16cd2017-02-02 12:37:11 -08003295 NULL, NULL, 0, RTM_DELROUTE,
3296 info->portid, seq, 0) < 0) {
3297 kfree_skb(skb);
3298 skb = NULL;
3299 } else
3300 info->skip_notify = 1;
3301 }
3302
David Ahern0ae81332017-02-02 12:37:08 -08003303 list_for_each_entry_safe(sibling, next_sibling,
David Ahern93c2fb22018-04-18 15:38:59 -07003304 &rt->fib6_siblings,
3305 fib6_siblings) {
David Ahern0ae81332017-02-02 12:37:08 -08003306 err = fib6_del(sibling, info);
3307 if (err)
WANG Conge3330032017-02-27 16:07:43 -08003308 goto out_unlock;
David Ahern0ae81332017-02-02 12:37:08 -08003309 }
3310 }
3311
3312 err = fib6_del(rt, info);
WANG Conge3330032017-02-27 16:07:43 -08003313out_unlock:
Wei Wang66f5d6c2017-10-06 12:06:10 -07003314 spin_unlock_bh(&table->tb6_lock);
WANG Conge3330032017-02-27 16:07:43 -08003315out_put:
David Ahern93531c62018-04-17 17:33:25 -07003316 fib6_info_release(rt);
David Ahern16a16cd2017-02-02 12:37:11 -08003317
3318 if (skb) {
WANG Conge3330032017-02-27 16:07:43 -08003319 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
David Ahern16a16cd2017-02-02 12:37:11 -08003320 info->nlh, gfp_any());
3321 }
David Ahern0ae81332017-02-02 12:37:08 -08003322 return err;
3323}
3324
David Ahern23fb93a2018-04-17 17:33:23 -07003325static int ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
3326{
3327 int rc = -ESRCH;
3328
3329 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3330 goto out;
3331
3332 if (cfg->fc_flags & RTF_GATEWAY &&
3333 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3334 goto out;
Xin Long761f6022018-11-14 00:48:28 +08003335
3336 rc = rt6_remove_exception_rt(rt);
David Ahern23fb93a2018-04-17 17:33:23 -07003337out:
3338 return rc;
3339}
3340
David Ahern333c4302017-05-21 10:12:04 -06003341static int ip6_route_del(struct fib6_config *cfg,
3342 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343{
David Ahern8d1c8022018-04-17 17:33:26 -07003344 struct rt6_info *rt_cache;
Thomas Grafc71099a2006-08-04 23:20:06 -07003345 struct fib6_table *table;
David Ahern8d1c8022018-04-17 17:33:26 -07003346 struct fib6_info *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347 struct fib6_node *fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 int err = -ESRCH;
3349
Daniel Lezcano55786892008-03-04 13:47:47 -08003350 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
David Ahernd5d531c2017-05-21 10:12:05 -06003351 if (!table) {
3352 NL_SET_ERR_MSG(extack, "FIB table does not exist");
Thomas Grafc71099a2006-08-04 23:20:06 -07003353 return err;
David Ahernd5d531c2017-05-21 10:12:05 -06003354 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355
Wei Wang66f5d6c2017-10-06 12:06:10 -07003356 rcu_read_lock();
Thomas Grafc71099a2006-08-04 23:20:06 -07003357
3358 fn = fib6_locate(&table->tb6_root,
Thomas Graf86872cb2006-08-22 00:01:08 -07003359 &cfg->fc_dst, cfg->fc_dst_len,
Wei Wang38fbeee2017-10-06 12:06:02 -07003360 &cfg->fc_src, cfg->fc_src_len,
Wei Wang2b760fc2017-10-06 12:06:03 -07003361 !(cfg->fc_flags & RTF_CACHE));
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003362
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 if (fn) {
Wei Wang66f5d6c2017-10-06 12:06:10 -07003364 for_each_fib6_node_rt_rcu(fn) {
David Ahernad1601a2019-03-27 20:53:56 -07003365 struct fib6_nh *nh;
3366
Wei Wang2b760fc2017-10-06 12:06:03 -07003367 if (cfg->fc_flags & RTF_CACHE) {
David Ahern7e4b5122019-04-16 14:36:00 -07003368 struct fib6_result res = {
3369 .f6i = rt,
3370 };
David Ahern23fb93a2018-04-17 17:33:23 -07003371 int rc;
3372
David Ahern7e4b5122019-04-16 14:36:00 -07003373 rt_cache = rt6_find_cached_rt(&res,
3374 &cfg->fc_dst,
Wei Wang2b760fc2017-10-06 12:06:03 -07003375 &cfg->fc_src);
David Ahern23fb93a2018-04-17 17:33:23 -07003376 if (rt_cache) {
3377 rc = ip6_del_cached_rt(rt_cache, cfg);
Eric Dumazet9e575012018-05-09 10:05:46 -07003378 if (rc != -ESRCH) {
3379 rcu_read_unlock();
David Ahern23fb93a2018-04-17 17:33:23 -07003380 return rc;
Eric Dumazet9e575012018-05-09 10:05:46 -07003381 }
David Ahern23fb93a2018-04-17 17:33:23 -07003382 }
3383 continue;
Wei Wang2b760fc2017-10-06 12:06:03 -07003384 }
David Ahernad1601a2019-03-27 20:53:56 -07003385
3386 nh = &rt->fib6_nh;
Thomas Graf86872cb2006-08-22 00:01:08 -07003387 if (cfg->fc_ifindex &&
David Ahernad1601a2019-03-27 20:53:56 -07003388 (!nh->fib_nh_dev ||
3389 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390 continue;
Thomas Graf86872cb2006-08-22 00:01:08 -07003391 if (cfg->fc_flags & RTF_GATEWAY &&
David Ahernad1601a2019-03-27 20:53:56 -07003392 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07003394 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003395 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07003396 if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
Mantas Mc2ed1882016-12-16 10:30:59 +02003397 continue;
Wei Wange873e4b2018-07-21 20:56:32 -07003398 if (!fib6_info_hold_safe(rt))
3399 continue;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003400 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401
David Ahern0ae81332017-02-02 12:37:08 -08003402 /* if gateway was specified only delete the one hop */
3403 if (cfg->fc_flags & RTF_GATEWAY)
3404 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3405
3406 return __ip6_del_rt_siblings(rt, cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407 }
3408 }
Wei Wang66f5d6c2017-10-06 12:06:10 -07003409 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410
3411 return err;
3412}
3413
David S. Miller6700c272012-07-17 03:29:28 -07003414static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
YOSHIFUJI Hideakia6279452006-08-23 17:18:26 -07003415{
YOSHIFUJI Hideakia6279452006-08-23 17:18:26 -07003416 struct netevent_redirect netevent;
David S. Millere8599ff2012-07-11 23:43:53 -07003417 struct rt6_info *rt, *nrt = NULL;
David Ahern85bd05d2019-04-16 14:36:01 -07003418 struct fib6_result res = {};
David S. Millere8599ff2012-07-11 23:43:53 -07003419 struct ndisc_options ndopts;
3420 struct inet6_dev *in6_dev;
3421 struct neighbour *neigh;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003422 struct rd_msg *msg;
David S. Miller6e157b62012-07-12 00:05:02 -07003423 int optlen, on_link;
3424 u8 *lladdr;
David S. Millere8599ff2012-07-11 23:43:53 -07003425
Simon Horman29a3cad2013-05-28 20:34:26 +00003426 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003427 optlen -= sizeof(*msg);
David S. Millere8599ff2012-07-11 23:43:53 -07003428
3429 if (optlen < 0) {
David S. Miller6e157b62012-07-12 00:05:02 -07003430 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003431 return;
3432 }
3433
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003434 msg = (struct rd_msg *)icmp6_hdr(skb);
David S. Millere8599ff2012-07-11 23:43:53 -07003435
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003436 if (ipv6_addr_is_multicast(&msg->dest)) {
David S. Miller6e157b62012-07-12 00:05:02 -07003437 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003438 return;
3439 }
3440
David S. Miller6e157b62012-07-12 00:05:02 -07003441 on_link = 0;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003442 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
David S. Millere8599ff2012-07-11 23:43:53 -07003443 on_link = 1;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003444 } else if (ipv6_addr_type(&msg->target) !=
David S. Millere8599ff2012-07-11 23:43:53 -07003445 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
David S. Miller6e157b62012-07-12 00:05:02 -07003446 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003447 return;
3448 }
3449
3450 in6_dev = __in6_dev_get(skb->dev);
3451 if (!in6_dev)
3452 return;
3453 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3454 return;
3455
3456 /* RFC2461 8.1:
3457 * The IP source address of the Redirect MUST be the same as the current
3458 * first-hop router for the specified ICMP Destination Address.
3459 */
3460
Alexander Aringf997c552016-06-15 21:20:23 +02003461 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
David S. Millere8599ff2012-07-11 23:43:53 -07003462 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3463 return;
3464 }
David S. Miller6e157b62012-07-12 00:05:02 -07003465
3466 lladdr = NULL;
David S. Millere8599ff2012-07-11 23:43:53 -07003467 if (ndopts.nd_opts_tgt_lladdr) {
3468 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3469 skb->dev);
3470 if (!lladdr) {
3471 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3472 return;
3473 }
3474 }
3475
David S. Miller6e157b62012-07-12 00:05:02 -07003476 rt = (struct rt6_info *) dst;
Matthias Schifferec13ad12015-11-02 01:24:38 +01003477 if (rt->rt6i_flags & RTF_REJECT) {
David S. Miller6e157b62012-07-12 00:05:02 -07003478 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3479 return;
3480 }
3481
3482 /* Redirect received -> path was valid.
3483 * Look, redirects are sent only in response to data packets,
3484 * so that this nexthop apparently is reachable. --ANK
3485 */
Julian Anastasov0dec8792017-02-06 23:14:16 +02003486 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
David S. Miller6e157b62012-07-12 00:05:02 -07003487
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003488 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
David S. Millere8599ff2012-07-11 23:43:53 -07003489 if (!neigh)
3490 return;
3491
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 /*
3493 * We have finally decided to accept it.
3494 */
3495
Alexander Aringf997c552016-06-15 21:20:23 +02003496 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 NEIGH_UPDATE_F_WEAK_OVERRIDE|
3498 NEIGH_UPDATE_F_OVERRIDE|
3499 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
Alexander Aringf997c552016-06-15 21:20:23 +02003500 NEIGH_UPDATE_F_ISROUTER)),
3501 NDISC_REDIRECT, &ndopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502
David Ahern4d85cd02018-04-20 15:37:59 -07003503 rcu_read_lock();
David Ahern85bd05d2019-04-16 14:36:01 -07003504 res.f6i = rcu_dereference(rt->from);
David S. Millerff24e492019-05-02 22:14:21 -04003505 if (!res.f6i)
Martin KaFai Lau886b7a52019-04-30 10:45:12 -07003506 goto out;
David Ahern8a14e462018-04-23 11:32:07 -07003507
David Ahern85bd05d2019-04-16 14:36:01 -07003508 res.nh = &res.f6i->fib6_nh;
David Ahern7d21fec2019-04-16 14:36:11 -07003509 res.fib6_flags = res.f6i->fib6_flags;
3510 res.fib6_type = res.f6i->fib6_type;
David Ahern85bd05d2019-04-16 14:36:01 -07003511 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
David S. Miller38308472011-12-03 18:02:47 -05003512 if (!nrt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513 goto out;
3514
3515 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3516 if (on_link)
3517 nrt->rt6i_flags &= ~RTF_GATEWAY;
3518
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003519 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520
Martin KaFai Lau886b7a52019-04-30 10:45:12 -07003521 /* rt6_insert_exception() will take care of duplicated exceptions */
David Ahern5012f0a2019-04-16 14:36:05 -07003522 if (rt6_insert_exception(nrt, &res)) {
Wei Wang2b760fc2017-10-06 12:06:03 -07003523 dst_release_immediate(&nrt->dst);
3524 goto out;
3525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526
Changli Gaod8d1f302010-06-10 23:31:35 -07003527 netevent.old = &rt->dst;
3528 netevent.new = &nrt->dst;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003529 netevent.daddr = &msg->dest;
YOSHIFUJI Hideaki / 吉藤英明60592832013-01-14 09:28:27 +00003530 netevent.neigh = neigh;
Tom Tucker8d717402006-07-30 20:43:36 -07003531 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3532
Linus Torvalds1da177e2005-04-16 15:20:36 -07003533out:
Martin KaFai Lau886b7a52019-04-30 10:45:12 -07003534 rcu_read_unlock();
David S. Millere8599ff2012-07-11 23:43:53 -07003535 neigh_release(neigh);
David S. Miller6e157b62012-07-12 00:05:02 -07003536}
3537
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003538#ifdef CONFIG_IPV6_ROUTE_INFO
David Ahern8d1c8022018-04-17 17:33:26 -07003539static struct fib6_info *rt6_get_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +00003540 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -07003541 const struct in6_addr *gwaddr,
3542 struct net_device *dev)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003543{
David Ahern830218c2016-10-24 10:52:35 -07003544 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3545 int ifindex = dev->ifindex;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003546 struct fib6_node *fn;
David Ahern8d1c8022018-04-17 17:33:26 -07003547 struct fib6_info *rt = NULL;
Thomas Grafc71099a2006-08-04 23:20:06 -07003548 struct fib6_table *table;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003549
David Ahern830218c2016-10-24 10:52:35 -07003550 table = fib6_get_table(net, tb_id);
David S. Miller38308472011-12-03 18:02:47 -05003551 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07003552 return NULL;
3553
Wei Wang66f5d6c2017-10-06 12:06:10 -07003554 rcu_read_lock();
Wei Wang38fbeee2017-10-06 12:06:02 -07003555 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003556 if (!fn)
3557 goto out;
3558
Wei Wang66f5d6c2017-10-06 12:06:10 -07003559 for_each_fib6_node_rt_rcu(fn) {
David Ahernad1601a2019-03-27 20:53:56 -07003560 if (rt->fib6_nh.fib_nh_dev->ifindex != ifindex)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003561 continue;
David Ahern2b2450c2019-03-27 20:53:52 -07003562 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
David Ahernbdf00462019-04-05 16:30:26 -07003563 !rt->fib6_nh.fib_nh_gw_family)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003564 continue;
David Ahernad1601a2019-03-27 20:53:56 -07003565 if (!ipv6_addr_equal(&rt->fib6_nh.fib_nh_gw6, gwaddr))
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003566 continue;
Wei Wange873e4b2018-07-21 20:56:32 -07003567 if (!fib6_info_hold_safe(rt))
3568 continue;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003569 break;
3570 }
3571out:
Wei Wang66f5d6c2017-10-06 12:06:10 -07003572 rcu_read_unlock();
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003573 return rt;
3574}
3575
David Ahern8d1c8022018-04-17 17:33:26 -07003576static struct fib6_info *rt6_add_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +00003577 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -07003578 const struct in6_addr *gwaddr,
3579 struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +00003580 unsigned int pref)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003581{
Thomas Graf86872cb2006-08-22 00:01:08 -07003582 struct fib6_config cfg = {
Rami Rosen238fc7e2008-02-09 23:43:11 -08003583 .fc_metric = IP6_RT_PRIO_USER,
David Ahern830218c2016-10-24 10:52:35 -07003584 .fc_ifindex = dev->ifindex,
Thomas Graf86872cb2006-08-22 00:01:08 -07003585 .fc_dst_len = prefixlen,
3586 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3587 RTF_UP | RTF_PREF(pref),
Xin Longb91d5322017-08-03 14:13:46 +08003588 .fc_protocol = RTPROT_RA,
David Aherne8478e82018-04-17 17:33:13 -07003589 .fc_type = RTN_UNICAST,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003590 .fc_nlinfo.portid = 0,
Daniel Lezcanoefa2cea2008-03-04 13:46:48 -08003591 .fc_nlinfo.nlh = NULL,
3592 .fc_nlinfo.nl_net = net,
Thomas Graf86872cb2006-08-22 00:01:08 -07003593 };
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003594
David Ahern830218c2016-10-24 10:52:35 -07003595 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003596 cfg.fc_dst = *prefix;
3597 cfg.fc_gateway = *gwaddr;
Thomas Graf86872cb2006-08-22 00:01:08 -07003598
YOSHIFUJI Hideakie317da92006-03-20 17:06:42 -08003599 /* We should treat it as a default route if prefix length is 0. */
3600 if (!prefixlen)
Thomas Graf86872cb2006-08-22 00:01:08 -07003601 cfg.fc_flags |= RTF_DEFAULT;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003602
David Ahernacb54e32018-04-17 17:33:22 -07003603 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003604
David Ahern830218c2016-10-24 10:52:35 -07003605 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003606}
3607#endif
3608
David Ahern8d1c8022018-04-17 17:33:26 -07003609struct fib6_info *rt6_get_dflt_router(struct net *net,
David Ahernafb1d4b52018-04-17 17:33:11 -07003610 const struct in6_addr *addr,
3611 struct net_device *dev)
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003612{
David Ahern830218c2016-10-24 10:52:35 -07003613 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
David Ahern8d1c8022018-04-17 17:33:26 -07003614 struct fib6_info *rt;
Thomas Grafc71099a2006-08-04 23:20:06 -07003615 struct fib6_table *table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616
David Ahernafb1d4b52018-04-17 17:33:11 -07003617 table = fib6_get_table(net, tb_id);
David S. Miller38308472011-12-03 18:02:47 -05003618 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07003619 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620
Wei Wang66f5d6c2017-10-06 12:06:10 -07003621 rcu_read_lock();
3622 for_each_fib6_node_rt_rcu(&table->tb6_root) {
David Ahernad1601a2019-03-27 20:53:56 -07003623 struct fib6_nh *nh = &rt->fib6_nh;
3624
3625 if (dev == nh->fib_nh_dev &&
David Ahern93c2fb22018-04-18 15:38:59 -07003626 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
David Ahernad1601a2019-03-27 20:53:56 -07003627 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628 break;
3629 }
Wei Wange873e4b2018-07-21 20:56:32 -07003630 if (rt && !fib6_info_hold_safe(rt))
3631 rt = NULL;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003632 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003633 return rt;
3634}
3635
David Ahern8d1c8022018-04-17 17:33:26 -07003636struct fib6_info *rt6_add_dflt_router(struct net *net,
David Ahernafb1d4b52018-04-17 17:33:11 -07003637 const struct in6_addr *gwaddr,
YOSHIFUJI Hideakiebacaaa2006-03-20 17:04:53 -08003638 struct net_device *dev,
3639 unsigned int pref)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640{
Thomas Graf86872cb2006-08-22 00:01:08 -07003641 struct fib6_config cfg = {
David Ahernca254492015-10-12 11:47:10 -07003642 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
Rami Rosen238fc7e2008-02-09 23:43:11 -08003643 .fc_metric = IP6_RT_PRIO_USER,
Thomas Graf86872cb2006-08-22 00:01:08 -07003644 .fc_ifindex = dev->ifindex,
3645 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3646 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
Xin Longb91d5322017-08-03 14:13:46 +08003647 .fc_protocol = RTPROT_RA,
David Aherne8478e82018-04-17 17:33:13 -07003648 .fc_type = RTN_UNICAST,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003649 .fc_nlinfo.portid = 0,
Daniel Lezcano55786892008-03-04 13:47:47 -08003650 .fc_nlinfo.nlh = NULL,
David Ahernafb1d4b52018-04-17 17:33:11 -07003651 .fc_nlinfo.nl_net = net,
Thomas Graf86872cb2006-08-22 00:01:08 -07003652 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003653
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003654 cfg.fc_gateway = *gwaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003655
David Ahernacb54e32018-04-17 17:33:22 -07003656 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
David Ahern830218c2016-10-24 10:52:35 -07003657 struct fib6_table *table;
3658
3659 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3660 if (table)
3661 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3662 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663
David Ahernafb1d4b52018-04-17 17:33:11 -07003664 return rt6_get_dflt_router(net, gwaddr, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003665}
3666
David Ahernafb1d4b52018-04-17 17:33:11 -07003667static void __rt6_purge_dflt_routers(struct net *net,
3668 struct fib6_table *table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003669{
David Ahern8d1c8022018-04-17 17:33:26 -07003670 struct fib6_info *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671
3672restart:
Wei Wang66f5d6c2017-10-06 12:06:10 -07003673 rcu_read_lock();
3674 for_each_fib6_node_rt_rcu(&table->tb6_root) {
David Aherndcd1f572018-04-18 15:39:05 -07003675 struct net_device *dev = fib6_info_nh_dev(rt);
3676 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
3677
David Ahern93c2fb22018-04-18 15:38:59 -07003678 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
Wei Wange873e4b2018-07-21 20:56:32 -07003679 (!idev || idev->cnf.accept_ra != 2) &&
3680 fib6_info_hold_safe(rt)) {
David Ahern93531c62018-04-17 17:33:25 -07003681 rcu_read_unlock();
3682 ip6_del_rt(net, rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003683 goto restart;
3684 }
3685 }
Wei Wang66f5d6c2017-10-06 12:06:10 -07003686 rcu_read_unlock();
David Ahern830218c2016-10-24 10:52:35 -07003687
3688 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3689}
3690
3691void rt6_purge_dflt_routers(struct net *net)
3692{
3693 struct fib6_table *table;
3694 struct hlist_head *head;
3695 unsigned int h;
3696
3697 rcu_read_lock();
3698
3699 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3700 head = &net->ipv6.fib_table_hash[h];
3701 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3702 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
David Ahernafb1d4b52018-04-17 17:33:11 -07003703 __rt6_purge_dflt_routers(net, table);
David Ahern830218c2016-10-24 10:52:35 -07003704 }
3705 }
3706
3707 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708}
3709
Daniel Lezcano55786892008-03-04 13:47:47 -08003710static void rtmsg_to_fib6_config(struct net *net,
3711 struct in6_rtmsg *rtmsg,
Thomas Graf86872cb2006-08-22 00:01:08 -07003712 struct fib6_config *cfg)
3713{
Maciej Żenczykowski8823a3a2018-09-29 23:44:52 -07003714 *cfg = (struct fib6_config){
3715 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3716 : RT6_TABLE_MAIN,
3717 .fc_ifindex = rtmsg->rtmsg_ifindex,
David Ahern67f69512019-03-21 05:21:34 -07003718 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
Maciej Żenczykowski8823a3a2018-09-29 23:44:52 -07003719 .fc_expires = rtmsg->rtmsg_info,
3720 .fc_dst_len = rtmsg->rtmsg_dst_len,
3721 .fc_src_len = rtmsg->rtmsg_src_len,
3722 .fc_flags = rtmsg->rtmsg_flags,
3723 .fc_type = rtmsg->rtmsg_type,
Thomas Graf86872cb2006-08-22 00:01:08 -07003724
Maciej Żenczykowski8823a3a2018-09-29 23:44:52 -07003725 .fc_nlinfo.nl_net = net,
Thomas Graf86872cb2006-08-22 00:01:08 -07003726
Maciej Żenczykowski8823a3a2018-09-29 23:44:52 -07003727 .fc_dst = rtmsg->rtmsg_dst,
3728 .fc_src = rtmsg->rtmsg_src,
3729 .fc_gateway = rtmsg->rtmsg_gateway,
3730 };
Thomas Graf86872cb2006-08-22 00:01:08 -07003731}
3732
Daniel Lezcano55786892008-03-04 13:47:47 -08003733int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734{
Thomas Graf86872cb2006-08-22 00:01:08 -07003735 struct fib6_config cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736 struct in6_rtmsg rtmsg;
3737 int err;
3738
Ian Morris67ba4152014-08-24 21:53:10 +01003739 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740 case SIOCADDRT: /* Add a route */
3741 case SIOCDELRT: /* Delete a route */
Eric W. Biedermanaf31f412012-11-16 03:03:06 +00003742 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743 return -EPERM;
3744 err = copy_from_user(&rtmsg, arg,
3745 sizeof(struct in6_rtmsg));
3746 if (err)
3747 return -EFAULT;
Thomas Graf86872cb2006-08-22 00:01:08 -07003748
Daniel Lezcano55786892008-03-04 13:47:47 -08003749 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
Thomas Graf86872cb2006-08-22 00:01:08 -07003750
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 rtnl_lock();
3752 switch (cmd) {
3753 case SIOCADDRT:
David Ahernacb54e32018-04-17 17:33:22 -07003754 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003755 break;
3756 case SIOCDELRT:
David Ahern333c4302017-05-21 10:12:04 -06003757 err = ip6_route_del(&cfg, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 break;
3759 default:
3760 err = -EINVAL;
3761 }
3762 rtnl_unlock();
3763
3764 return err;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003765 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766
3767 return -EINVAL;
3768}
3769
3770/*
3771 * Drop the packet on the floor
3772 */
3773
Brian Haleyd5fdd6b2009-06-23 04:31:07 -07003774static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003775{
Eric Dumazetadf30902009-06-02 05:19:30 +00003776 struct dst_entry *dst = skb_dst(skb);
Stephen Suryaputra1d3fd8a2019-04-27 09:14:33 -04003777 struct net *net = dev_net(dst->dev);
3778 struct inet6_dev *idev;
3779 int type;
3780
3781 if (netif_is_l3_master(skb->dev) &&
3782 dst->dev == net->loopback_dev)
3783 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
3784 else
3785 idev = ip6_dst_idev(dst);
3786
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003787 switch (ipstats_mib_noroutes) {
3788 case IPSTATS_MIB_INNOROUTES:
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07003789 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
Ulrich Weber45bb0062010-02-25 23:28:58 +00003790 if (type == IPV6_ADDR_ANY) {
Stephen Suryaputra1d3fd8a2019-04-27 09:14:33 -04003791 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003792 break;
3793 }
3794 /* FALLTHROUGH */
3795 case IPSTATS_MIB_OUTNOROUTES:
Stephen Suryaputra1d3fd8a2019-04-27 09:14:33 -04003796 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003797 break;
3798 }
Stephen Suryaputra1d3fd8a2019-04-27 09:14:33 -04003799
3800 /* Start over by dropping the dst for l3mdev case */
3801 if (netif_is_l3_master(skb->dev))
3802 skb_dst_drop(skb);
3803
Alexey Dobriyan3ffe5332010-02-18 08:25:24 +00003804 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003805 kfree_skb(skb);
3806 return 0;
3807}
3808
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003809static int ip6_pkt_discard(struct sk_buff *skb)
3810{
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003811 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003812}
3813
Eric W. Biedermanede20592015-10-07 16:48:47 -05003814static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815{
Eric Dumazetadf30902009-06-02 05:19:30 +00003816 skb->dev = skb_dst(skb)->dev;
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003817 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003818}
3819
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003820static int ip6_pkt_prohibit(struct sk_buff *skb)
3821{
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003822 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003823}
3824
Eric W. Biedermanede20592015-10-07 16:48:47 -05003825static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003826{
Eric Dumazetadf30902009-06-02 05:19:30 +00003827 skb->dev = skb_dst(skb)->dev;
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003828 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003829}
3830
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831/*
3832 * Allocate a dst for local (unicast / anycast) address.
3833 */
3834
David Ahern360a9882018-04-18 15:39:00 -07003835struct fib6_info *addrconf_f6i_alloc(struct net *net,
3836 struct inet6_dev *idev,
3837 const struct in6_addr *addr,
3838 bool anycast, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003839{
David Ahernc7a1ce32019-03-21 05:21:35 -07003840 struct fib6_config cfg = {
3841 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
3842 .fc_ifindex = idev->dev->ifindex,
3843 .fc_flags = RTF_UP | RTF_ADDRCONF | RTF_NONEXTHOP,
3844 .fc_dst = *addr,
3845 .fc_dst_len = 128,
3846 .fc_protocol = RTPROT_KERNEL,
3847 .fc_nlinfo.nl_net = net,
3848 .fc_ignore_dev_down = true,
3849 };
David Ahern5f02ce242016-09-10 12:09:54 -07003850
David Aherne8478e82018-04-17 17:33:13 -07003851 if (anycast) {
David Ahernc7a1ce32019-03-21 05:21:35 -07003852 cfg.fc_type = RTN_ANYCAST;
3853 cfg.fc_flags |= RTF_ANYCAST;
David Aherne8478e82018-04-17 17:33:13 -07003854 } else {
David Ahernc7a1ce32019-03-21 05:21:35 -07003855 cfg.fc_type = RTN_LOCAL;
3856 cfg.fc_flags |= RTF_LOCAL;
David Aherne8478e82018-04-17 17:33:13 -07003857 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858
David Ahernc7a1ce32019-03-21 05:21:35 -07003859 return ip6_route_info_create(&cfg, gfp_flags, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003860}
3861
Daniel Walterc3968a82011-04-13 21:10:57 +00003862/* remove deleted ip from prefsrc entries */
3863struct arg_dev_net_ip {
3864 struct net_device *dev;
3865 struct net *net;
3866 struct in6_addr *addr;
3867};
3868
David Ahern8d1c8022018-04-17 17:33:26 -07003869static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
Daniel Walterc3968a82011-04-13 21:10:57 +00003870{
3871 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3872 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3873 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3874
David Ahernad1601a2019-03-27 20:53:56 -07003875 if (((void *)rt->fib6_nh.fib_nh_dev == dev || !dev) &&
David Ahern421842e2018-04-17 17:33:18 -07003876 rt != net->ipv6.fib6_null_entry &&
David Ahern93c2fb22018-04-18 15:38:59 -07003877 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
Wei Wang60006a42017-10-06 12:05:58 -07003878 spin_lock_bh(&rt6_exception_lock);
Daniel Walterc3968a82011-04-13 21:10:57 +00003879 /* remove prefsrc entry */
David Ahern93c2fb22018-04-18 15:38:59 -07003880 rt->fib6_prefsrc.plen = 0;
Wei Wang60006a42017-10-06 12:05:58 -07003881 spin_unlock_bh(&rt6_exception_lock);
Daniel Walterc3968a82011-04-13 21:10:57 +00003882 }
3883 return 0;
3884}
3885
3886void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
3887{
3888 struct net *net = dev_net(ifp->idev->dev);
3889 struct arg_dev_net_ip adni = {
3890 .dev = ifp->idev->dev,
3891 .net = net,
3892 .addr = &ifp->addr,
3893 };
Li RongQing0c3584d2013-12-27 16:32:38 +08003894 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
Daniel Walterc3968a82011-04-13 21:10:57 +00003895}
3896
David Ahern2b2450c2019-03-27 20:53:52 -07003897#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
Duan Jiongbe7a0102014-05-15 15:56:14 +08003898
3899/* Remove routers and update dst entries when gateway turn into host. */
David Ahern8d1c8022018-04-17 17:33:26 -07003900static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
Duan Jiongbe7a0102014-05-15 15:56:14 +08003901{
3902 struct in6_addr *gateway = (struct in6_addr *)arg;
3903
David Ahern93c2fb22018-04-18 15:38:59 -07003904 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
David Ahernbdf00462019-04-05 16:30:26 -07003905 rt->fib6_nh.fib_nh_gw_family &&
David Ahernad1601a2019-03-27 20:53:56 -07003906 ipv6_addr_equal(gateway, &rt->fib6_nh.fib_nh_gw6)) {
Duan Jiongbe7a0102014-05-15 15:56:14 +08003907 return -1;
3908 }
Wei Wangb16cb452017-10-06 12:06:00 -07003909
3910 /* Further clean up cached routes in exception table.
3911 * This is needed because cached route may have a different
3912 * gateway than its 'parent' in the case of an ip redirect.
3913 */
3914 rt6_exceptions_clean_tohost(rt, gateway);
3915
Duan Jiongbe7a0102014-05-15 15:56:14 +08003916 return 0;
3917}
3918
3919void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
3920{
3921 fib6_clean_all(net, fib6_clean_tohost, gateway);
3922}
3923
Ido Schimmel2127d952018-01-07 12:45:03 +02003924struct arg_netdev_event {
3925 const struct net_device *dev;
Ido Schimmel4c981e22018-01-07 12:45:04 +02003926 union {
David Ahernecc56632019-04-23 08:48:09 -07003927 unsigned char nh_flags;
Ido Schimmel4c981e22018-01-07 12:45:04 +02003928 unsigned long event;
3929 };
Ido Schimmel2127d952018-01-07 12:45:03 +02003930};
3931
David Ahern8d1c8022018-04-17 17:33:26 -07003932static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003933{
David Ahern8d1c8022018-04-17 17:33:26 -07003934 struct fib6_info *iter;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003935 struct fib6_node *fn;
3936
David Ahern93c2fb22018-04-18 15:38:59 -07003937 fn = rcu_dereference_protected(rt->fib6_node,
3938 lockdep_is_held(&rt->fib6_table->tb6_lock));
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003939 iter = rcu_dereference_protected(fn->leaf,
David Ahern93c2fb22018-04-18 15:38:59 -07003940 lockdep_is_held(&rt->fib6_table->tb6_lock));
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003941 while (iter) {
David Ahern93c2fb22018-04-18 15:38:59 -07003942 if (iter->fib6_metric == rt->fib6_metric &&
David Ahern33bd5ac2018-07-03 14:36:21 -07003943 rt6_qualify_for_ecmp(iter))
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003944 return iter;
David Ahern8fb11a92018-05-04 13:54:24 -07003945 iter = rcu_dereference_protected(iter->fib6_next,
David Ahern93c2fb22018-04-18 15:38:59 -07003946 lockdep_is_held(&rt->fib6_table->tb6_lock));
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003947 }
3948
3949 return NULL;
3950}
3951
David Ahern8d1c8022018-04-17 17:33:26 -07003952static bool rt6_is_dead(const struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003953{
David Ahernad1601a2019-03-27 20:53:56 -07003954 if (rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD ||
3955 (rt->fib6_nh.fib_nh_flags & RTNH_F_LINKDOWN &&
3956 ip6_ignore_linkdown(rt->fib6_nh.fib_nh_dev)))
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003957 return true;
3958
3959 return false;
3960}
3961
David Ahern8d1c8022018-04-17 17:33:26 -07003962static int rt6_multipath_total_weight(const struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003963{
David Ahern8d1c8022018-04-17 17:33:26 -07003964 struct fib6_info *iter;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003965 int total = 0;
3966
3967 if (!rt6_is_dead(rt))
David Ahernad1601a2019-03-27 20:53:56 -07003968 total += rt->fib6_nh.fib_nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003969
David Ahern93c2fb22018-04-18 15:38:59 -07003970 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003971 if (!rt6_is_dead(iter))
David Ahernad1601a2019-03-27 20:53:56 -07003972 total += iter->fib6_nh.fib_nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003973 }
3974
3975 return total;
3976}
3977
David Ahern8d1c8022018-04-17 17:33:26 -07003978static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003979{
3980 int upper_bound = -1;
3981
3982 if (!rt6_is_dead(rt)) {
David Ahernad1601a2019-03-27 20:53:56 -07003983 *weight += rt->fib6_nh.fib_nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003984 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
3985 total) - 1;
3986 }
David Ahernad1601a2019-03-27 20:53:56 -07003987 atomic_set(&rt->fib6_nh.fib_nh_upper_bound, upper_bound);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003988}
3989
David Ahern8d1c8022018-04-17 17:33:26 -07003990static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003991{
David Ahern8d1c8022018-04-17 17:33:26 -07003992 struct fib6_info *iter;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003993 int weight = 0;
3994
3995 rt6_upper_bound_set(rt, &weight, total);
3996
David Ahern93c2fb22018-04-18 15:38:59 -07003997 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003998 rt6_upper_bound_set(iter, &weight, total);
3999}
4000
David Ahern8d1c8022018-04-17 17:33:26 -07004001void rt6_multipath_rebalance(struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004002{
David Ahern8d1c8022018-04-17 17:33:26 -07004003 struct fib6_info *first;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004004 int total;
4005
4006 /* In case the entire multipath route was marked for flushing,
4007 * then there is no need to rebalance upon the removal of every
4008 * sibling route.
4009 */
David Ahern93c2fb22018-04-18 15:38:59 -07004010 if (!rt->fib6_nsiblings || rt->should_flush)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004011 return;
4012
4013 /* During lookup routes are evaluated in order, so we need to
4014 * make sure upper bounds are assigned from the first sibling
4015 * onwards.
4016 */
4017 first = rt6_multipath_first_sibling(rt);
4018 if (WARN_ON_ONCE(!first))
4019 return;
4020
4021 total = rt6_multipath_total_weight(first);
4022 rt6_multipath_upper_bound_set(first, total);
4023}
4024
David Ahern8d1c8022018-04-17 17:33:26 -07004025static int fib6_ifup(struct fib6_info *rt, void *p_arg)
Ido Schimmel2127d952018-01-07 12:45:03 +02004026{
4027 const struct arg_netdev_event *arg = p_arg;
David Ahern7aef6852018-04-17 17:33:10 -07004028 struct net *net = dev_net(arg->dev);
Ido Schimmel2127d952018-01-07 12:45:03 +02004029
David Ahernad1601a2019-03-27 20:53:56 -07004030 if (rt != net->ipv6.fib6_null_entry &&
4031 rt->fib6_nh.fib_nh_dev == arg->dev) {
4032 rt->fib6_nh.fib_nh_flags &= ~arg->nh_flags;
David Ahern7aef6852018-04-17 17:33:10 -07004033 fib6_update_sernum_upto_root(net, rt);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004034 rt6_multipath_rebalance(rt);
Ido Schimmel1de178e2018-01-07 12:45:15 +02004035 }
Ido Schimmel2127d952018-01-07 12:45:03 +02004036
4037 return 0;
4038}
4039
David Ahernecc56632019-04-23 08:48:09 -07004040void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
Ido Schimmel2127d952018-01-07 12:45:03 +02004041{
4042 struct arg_netdev_event arg = {
4043 .dev = dev,
Ido Schimmel6802f3a2018-01-12 22:07:36 +02004044 {
4045 .nh_flags = nh_flags,
4046 },
Ido Schimmel2127d952018-01-07 12:45:03 +02004047 };
4048
4049 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4050 arg.nh_flags |= RTNH_F_LINKDOWN;
4051
4052 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4053}
4054
David Ahern8d1c8022018-04-17 17:33:26 -07004055static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
Ido Schimmel1de178e2018-01-07 12:45:15 +02004056 const struct net_device *dev)
4057{
David Ahern8d1c8022018-04-17 17:33:26 -07004058 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004059
David Ahernad1601a2019-03-27 20:53:56 -07004060 if (rt->fib6_nh.fib_nh_dev == dev)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004061 return true;
David Ahern93c2fb22018-04-18 15:38:59 -07004062 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
David Ahernad1601a2019-03-27 20:53:56 -07004063 if (iter->fib6_nh.fib_nh_dev == dev)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004064 return true;
4065
4066 return false;
4067}
4068
David Ahern8d1c8022018-04-17 17:33:26 -07004069static void rt6_multipath_flush(struct fib6_info *rt)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004070{
David Ahern8d1c8022018-04-17 17:33:26 -07004071 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004072
4073 rt->should_flush = 1;
David Ahern93c2fb22018-04-18 15:38:59 -07004074 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004075 iter->should_flush = 1;
4076}
4077
David Ahern8d1c8022018-04-17 17:33:26 -07004078static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
Ido Schimmel1de178e2018-01-07 12:45:15 +02004079 const struct net_device *down_dev)
4080{
David Ahern8d1c8022018-04-17 17:33:26 -07004081 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004082 unsigned int dead = 0;
4083
David Ahernad1601a2019-03-27 20:53:56 -07004084 if (rt->fib6_nh.fib_nh_dev == down_dev ||
4085 rt->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004086 dead++;
David Ahern93c2fb22018-04-18 15:38:59 -07004087 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
David Ahernad1601a2019-03-27 20:53:56 -07004088 if (iter->fib6_nh.fib_nh_dev == down_dev ||
4089 iter->fib6_nh.fib_nh_flags & RTNH_F_DEAD)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004090 dead++;
4091
4092 return dead;
4093}
4094
David Ahern8d1c8022018-04-17 17:33:26 -07004095static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
Ido Schimmel1de178e2018-01-07 12:45:15 +02004096 const struct net_device *dev,
David Ahernecc56632019-04-23 08:48:09 -07004097 unsigned char nh_flags)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004098{
David Ahern8d1c8022018-04-17 17:33:26 -07004099 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004100
David Ahernad1601a2019-03-27 20:53:56 -07004101 if (rt->fib6_nh.fib_nh_dev == dev)
4102 rt->fib6_nh.fib_nh_flags |= nh_flags;
David Ahern93c2fb22018-04-18 15:38:59 -07004103 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
David Ahernad1601a2019-03-27 20:53:56 -07004104 if (iter->fib6_nh.fib_nh_dev == dev)
4105 iter->fib6_nh.fib_nh_flags |= nh_flags;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004106}
4107
David Aherna1a22c12017-01-18 07:40:36 -08004108/* called with write lock held for table with rt */
David Ahern8d1c8022018-04-17 17:33:26 -07004109static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110{
Ido Schimmel4c981e22018-01-07 12:45:04 +02004111 const struct arg_netdev_event *arg = p_arg;
4112 const struct net_device *dev = arg->dev;
David Ahern7aef6852018-04-17 17:33:10 -07004113 struct net *net = dev_net(dev);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004114
David Ahern421842e2018-04-17 17:33:18 -07004115 if (rt == net->ipv6.fib6_null_entry)
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004116 return 0;
4117
4118 switch (arg->event) {
4119 case NETDEV_UNREGISTER:
David Ahernad1601a2019-03-27 20:53:56 -07004120 return rt->fib6_nh.fib_nh_dev == dev ? -1 : 0;
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004121 case NETDEV_DOWN:
Ido Schimmel1de178e2018-01-07 12:45:15 +02004122 if (rt->should_flush)
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004123 return -1;
David Ahern93c2fb22018-04-18 15:38:59 -07004124 if (!rt->fib6_nsiblings)
David Ahernad1601a2019-03-27 20:53:56 -07004125 return rt->fib6_nh.fib_nh_dev == dev ? -1 : 0;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004126 if (rt6_multipath_uses_dev(rt, dev)) {
4127 unsigned int count;
4128
4129 count = rt6_multipath_dead_count(rt, dev);
David Ahern93c2fb22018-04-18 15:38:59 -07004130 if (rt->fib6_nsiblings + 1 == count) {
Ido Schimmel1de178e2018-01-07 12:45:15 +02004131 rt6_multipath_flush(rt);
4132 return -1;
4133 }
4134 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4135 RTNH_F_LINKDOWN);
David Ahern7aef6852018-04-17 17:33:10 -07004136 fib6_update_sernum(net, rt);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004137 rt6_multipath_rebalance(rt);
Ido Schimmel1de178e2018-01-07 12:45:15 +02004138 }
4139 return -2;
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004140 case NETDEV_CHANGE:
David Ahernad1601a2019-03-27 20:53:56 -07004141 if (rt->fib6_nh.fib_nh_dev != dev ||
David Ahern93c2fb22018-04-18 15:38:59 -07004142 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004143 break;
David Ahernad1601a2019-03-27 20:53:56 -07004144 rt->fib6_nh.fib_nh_flags |= RTNH_F_LINKDOWN;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004145 rt6_multipath_rebalance(rt);
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004146 break;
Ido Schimmel2b241362018-01-07 12:45:02 +02004147 }
David S. Millerc159d302011-12-26 15:24:36 -05004148
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149 return 0;
4150}
4151
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004152void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004153{
Ido Schimmel4c981e22018-01-07 12:45:04 +02004154 struct arg_netdev_event arg = {
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004155 .dev = dev,
Ido Schimmel6802f3a2018-01-12 22:07:36 +02004156 {
4157 .event = event,
4158 },
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004159 };
David Ahern7c6bb7d2018-10-11 20:17:21 -07004160 struct net *net = dev_net(dev);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004161
David Ahern7c6bb7d2018-10-11 20:17:21 -07004162 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4163 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4164 else
4165 fib6_clean_all(net, fib6_ifdown, &arg);
Ido Schimmel4c981e22018-01-07 12:45:04 +02004166}
4167
4168void rt6_disable_ip(struct net_device *dev, unsigned long event)
4169{
4170 rt6_sync_down_dev(dev, event);
4171 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4172 neigh_ifdown(&nd_tbl, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004173}
4174
Eric Dumazet95c96172012-04-15 05:58:06 +00004175struct rt6_mtu_change_arg {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004176 struct net_device *dev;
Eric Dumazet95c96172012-04-15 05:58:06 +00004177 unsigned int mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178};
4179
David Ahern8d1c8022018-04-17 17:33:26 -07004180static int rt6_mtu_change_route(struct fib6_info *rt, void *p_arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181{
4182 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4183 struct inet6_dev *idev;
4184
4185 /* In IPv6 pmtu discovery is not optional,
4186 so that RTAX_MTU lock cannot disable it.
4187 We still use this lock to block changes
4188 caused by addrconf/ndisc.
4189 */
4190
4191 idev = __in6_dev_get(arg->dev);
David S. Miller38308472011-12-03 18:02:47 -05004192 if (!idev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 return 0;
4194
4195 /* For administrative MTU increase, there is no way to discover
4196 IPv6 PMTU increase, so PMTU increase should be updated here.
4197 Since RFC 1981 doesn't include administrative MTU increase
4198 update PMTU increase is a MUST. (i.e. jumbo frame)
4199 */
David Ahernad1601a2019-03-27 20:53:56 -07004200 if (rt->fib6_nh.fib_nh_dev == arg->dev &&
David Ahernd4ead6b2018-04-17 17:33:16 -07004201 !fib6_metric_locked(rt, RTAX_MTU)) {
4202 u32 mtu = rt->fib6_pmtu;
4203
4204 if (mtu >= arg->mtu ||
4205 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4206 fib6_metric_set(rt, RTAX_MTU, arg->mtu);
4207
Wei Wangf5bbe7e2017-10-06 12:05:59 -07004208 spin_lock_bh(&rt6_exception_lock);
Stefano Brivioe9fa1492018-03-06 11:10:19 +01004209 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
Wei Wangf5bbe7e2017-10-06 12:05:59 -07004210 spin_unlock_bh(&rt6_exception_lock);
Simon Arlott566cfd82007-07-26 00:09:55 -07004211 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 return 0;
4213}
4214
Eric Dumazet95c96172012-04-15 05:58:06 +00004215void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216{
Thomas Grafc71099a2006-08-04 23:20:06 -07004217 struct rt6_mtu_change_arg arg = {
4218 .dev = dev,
4219 .mtu = mtu,
4220 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221
Li RongQing0c3584d2013-12-27 16:32:38 +08004222 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223}
4224
Patrick McHardyef7c79e2007-06-05 12:38:30 -07004225static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
Thomas Graf5176f912006-08-26 20:13:18 -07004226 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
Eric Dumazetaa8f8772018-04-22 18:29:23 -07004227 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
Thomas Graf86872cb2006-08-22 00:01:08 -07004228 [RTA_OIF] = { .type = NLA_U32 },
Thomas Grafab364a62006-08-22 00:01:47 -07004229 [RTA_IIF] = { .type = NLA_U32 },
Thomas Graf86872cb2006-08-22 00:01:08 -07004230 [RTA_PRIORITY] = { .type = NLA_U32 },
4231 [RTA_METRICS] = { .type = NLA_NESTED },
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004232 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004233 [RTA_PREF] = { .type = NLA_U8 },
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004234 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4235 [RTA_ENCAP] = { .type = NLA_NESTED },
Xin Long32bc2012015-12-16 17:50:11 +08004236 [RTA_EXPIRES] = { .type = NLA_U32 },
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +09004237 [RTA_UID] = { .type = NLA_U32 },
Liping Zhang3b45a412017-02-27 20:59:39 +08004238 [RTA_MARK] = { .type = NLA_U32 },
Eric Dumazetaa8f8772018-04-22 18:29:23 -07004239 [RTA_TABLE] = { .type = NLA_U32 },
Roopa Prabhueacb9382018-05-22 14:03:28 -07004240 [RTA_IP_PROTO] = { .type = NLA_U8 },
4241 [RTA_SPORT] = { .type = NLA_U16 },
4242 [RTA_DPORT] = { .type = NLA_U16 },
Thomas Graf86872cb2006-08-22 00:01:08 -07004243};
4244
4245static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
David Ahern333c4302017-05-21 10:12:04 -06004246 struct fib6_config *cfg,
4247 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248{
Thomas Graf86872cb2006-08-22 00:01:08 -07004249 struct rtmsg *rtm;
4250 struct nlattr *tb[RTA_MAX+1];
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004251 unsigned int pref;
Thomas Graf86872cb2006-08-22 00:01:08 -07004252 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253
Johannes Berg8cb08172019-04-26 14:07:28 +02004254 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4255 rtm_ipv6_policy, extack);
Thomas Graf86872cb2006-08-22 00:01:08 -07004256 if (err < 0)
4257 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258
Thomas Graf86872cb2006-08-22 00:01:08 -07004259 err = -EINVAL;
4260 rtm = nlmsg_data(nlh);
Thomas Graf86872cb2006-08-22 00:01:08 -07004261
Maciej Żenczykowski84db8402018-09-29 23:44:53 -07004262 *cfg = (struct fib6_config){
4263 .fc_table = rtm->rtm_table,
4264 .fc_dst_len = rtm->rtm_dst_len,
4265 .fc_src_len = rtm->rtm_src_len,
4266 .fc_flags = RTF_UP,
4267 .fc_protocol = rtm->rtm_protocol,
4268 .fc_type = rtm->rtm_type,
4269
4270 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
4271 .fc_nlinfo.nlh = nlh,
4272 .fc_nlinfo.nl_net = sock_net(skb->sk),
4273 };
Thomas Graf86872cb2006-08-22 00:01:08 -07004274
Nicolas Dichtelef2c7d72012-09-05 02:12:42 +00004275 if (rtm->rtm_type == RTN_UNREACHABLE ||
4276 rtm->rtm_type == RTN_BLACKHOLE ||
Nicolas Dichtelb4949ab2012-09-06 05:53:35 +00004277 rtm->rtm_type == RTN_PROHIBIT ||
4278 rtm->rtm_type == RTN_THROW)
Thomas Graf86872cb2006-08-22 00:01:08 -07004279 cfg->fc_flags |= RTF_REJECT;
4280
Maciej Żenczykowskiab79ad12010-09-27 00:07:02 +00004281 if (rtm->rtm_type == RTN_LOCAL)
4282 cfg->fc_flags |= RTF_LOCAL;
4283
Martin KaFai Lau1f56a01f2015-04-28 13:03:03 -07004284 if (rtm->rtm_flags & RTM_F_CLONED)
4285 cfg->fc_flags |= RTF_CACHE;
4286
David Ahernfc1e64e2018-01-25 16:55:09 -08004287 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4288
Thomas Graf86872cb2006-08-22 00:01:08 -07004289 if (tb[RTA_GATEWAY]) {
Jiri Benc67b61f62015-03-29 16:59:26 +02004290 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
Thomas Graf86872cb2006-08-22 00:01:08 -07004291 cfg->fc_flags |= RTF_GATEWAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 }
David Aherne3818542019-02-26 09:00:03 -08004293 if (tb[RTA_VIA]) {
4294 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4295 goto errout;
4296 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004297
4298 if (tb[RTA_DST]) {
4299 int plen = (rtm->rtm_dst_len + 7) >> 3;
4300
4301 if (nla_len(tb[RTA_DST]) < plen)
4302 goto errout;
4303
4304 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004306
4307 if (tb[RTA_SRC]) {
4308 int plen = (rtm->rtm_src_len + 7) >> 3;
4309
4310 if (nla_len(tb[RTA_SRC]) < plen)
4311 goto errout;
4312
4313 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004315
Daniel Walterc3968a82011-04-13 21:10:57 +00004316 if (tb[RTA_PREFSRC])
Jiri Benc67b61f62015-03-29 16:59:26 +02004317 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
Daniel Walterc3968a82011-04-13 21:10:57 +00004318
Thomas Graf86872cb2006-08-22 00:01:08 -07004319 if (tb[RTA_OIF])
4320 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4321
4322 if (tb[RTA_PRIORITY])
4323 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4324
4325 if (tb[RTA_METRICS]) {
4326 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4327 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004329
4330 if (tb[RTA_TABLE])
4331 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4332
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004333 if (tb[RTA_MULTIPATH]) {
4334 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4335 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
David Ahern9ed59592017-01-17 14:57:36 -08004336
4337 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
David Ahernc255bd62017-05-27 16:19:27 -06004338 cfg->fc_mp_len, extack);
David Ahern9ed59592017-01-17 14:57:36 -08004339 if (err < 0)
4340 goto errout;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004341 }
4342
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004343 if (tb[RTA_PREF]) {
4344 pref = nla_get_u8(tb[RTA_PREF]);
4345 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4346 pref != ICMPV6_ROUTER_PREF_HIGH)
4347 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4348 cfg->fc_flags |= RTF_PREF(pref);
4349 }
4350
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004351 if (tb[RTA_ENCAP])
4352 cfg->fc_encap = tb[RTA_ENCAP];
4353
David Ahern9ed59592017-01-17 14:57:36 -08004354 if (tb[RTA_ENCAP_TYPE]) {
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004355 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4356
David Ahernc255bd62017-05-27 16:19:27 -06004357 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
David Ahern9ed59592017-01-17 14:57:36 -08004358 if (err < 0)
4359 goto errout;
4360 }
4361
Xin Long32bc2012015-12-16 17:50:11 +08004362 if (tb[RTA_EXPIRES]) {
4363 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4364
4365 if (addrconf_finite_timeout(timeout)) {
4366 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4367 cfg->fc_flags |= RTF_EXPIRES;
4368 }
4369 }
4370
Thomas Graf86872cb2006-08-22 00:01:08 -07004371 err = 0;
4372errout:
4373 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374}
4375
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004376struct rt6_nh {
David Ahern8d1c8022018-04-17 17:33:26 -07004377 struct fib6_info *fib6_info;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004378 struct fib6_config r_cfg;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004379 struct list_head next;
4380};
4381
David Ahernd4ead6b2018-04-17 17:33:16 -07004382static int ip6_route_info_append(struct net *net,
4383 struct list_head *rt6_nh_list,
David Ahern8d1c8022018-04-17 17:33:26 -07004384 struct fib6_info *rt,
4385 struct fib6_config *r_cfg)
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004386{
4387 struct rt6_nh *nh;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004388 int err = -EEXIST;
4389
4390 list_for_each_entry(nh, rt6_nh_list, next) {
David Ahern8d1c8022018-04-17 17:33:26 -07004391 /* check if fib6_info already exists */
4392 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004393 return err;
4394 }
4395
4396 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
4397 if (!nh)
4398 return -ENOMEM;
David Ahern8d1c8022018-04-17 17:33:26 -07004399 nh->fib6_info = rt;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004400 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4401 list_add_tail(&nh->next, rt6_nh_list);
4402
4403 return 0;
4404}
4405
David Ahern8d1c8022018-04-17 17:33:26 -07004406static void ip6_route_mpath_notify(struct fib6_info *rt,
4407 struct fib6_info *rt_last,
David Ahern3b1137f2017-02-02 12:37:10 -08004408 struct nl_info *info,
4409 __u16 nlflags)
4410{
4411 /* if this is an APPEND route, then rt points to the first route
4412 * inserted and rt_last points to last route inserted. Userspace
4413 * wants a consistent dump of the route which starts at the first
4414 * nexthop. Since sibling routes are always added at the end of
4415 * the list, find the first sibling of the last route appended
4416 */
David Ahern93c2fb22018-04-18 15:38:59 -07004417 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
4418 rt = list_first_entry(&rt_last->fib6_siblings,
David Ahern8d1c8022018-04-17 17:33:26 -07004419 struct fib6_info,
David Ahern93c2fb22018-04-18 15:38:59 -07004420 fib6_siblings);
David Ahern3b1137f2017-02-02 12:37:10 -08004421 }
4422
4423 if (rt)
4424 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
4425}
4426
David Ahern333c4302017-05-21 10:12:04 -06004427static int ip6_route_multipath_add(struct fib6_config *cfg,
4428 struct netlink_ext_ack *extack)
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004429{
David Ahern8d1c8022018-04-17 17:33:26 -07004430 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
David Ahern3b1137f2017-02-02 12:37:10 -08004431 struct nl_info *info = &cfg->fc_nlinfo;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004432 struct fib6_config r_cfg;
4433 struct rtnexthop *rtnh;
David Ahern8d1c8022018-04-17 17:33:26 -07004434 struct fib6_info *rt;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004435 struct rt6_nh *err_nh;
4436 struct rt6_nh *nh, *nh_safe;
David Ahern3b1137f2017-02-02 12:37:10 -08004437 __u16 nlflags;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004438 int remaining;
4439 int attrlen;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004440 int err = 1;
4441 int nhn = 0;
4442 int replace = (cfg->fc_nlinfo.nlh &&
4443 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
4444 LIST_HEAD(rt6_nh_list);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004445
David Ahern3b1137f2017-02-02 12:37:10 -08004446 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
4447 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
4448 nlflags |= NLM_F_APPEND;
4449
Michal Kubeček35f1b4e2015-05-18 20:53:55 +02004450 remaining = cfg->fc_mp_len;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004451 rtnh = (struct rtnexthop *)cfg->fc_mp;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004452
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004453 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
David Ahern8d1c8022018-04-17 17:33:26 -07004454 * fib6_info structs per nexthop
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004455 */
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004456 while (rtnh_ok(rtnh, remaining)) {
4457 memcpy(&r_cfg, cfg, sizeof(*cfg));
4458 if (rtnh->rtnh_ifindex)
4459 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4460
4461 attrlen = rtnh_attrlen(rtnh);
4462 if (attrlen > 0) {
4463 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4464
4465 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4466 if (nla) {
Jiri Benc67b61f62015-03-29 16:59:26 +02004467 r_cfg.fc_gateway = nla_get_in6_addr(nla);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004468 r_cfg.fc_flags |= RTF_GATEWAY;
4469 }
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004470 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
4471 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
4472 if (nla)
4473 r_cfg.fc_encap_type = nla_get_u16(nla);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004474 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004475
David Ahern68e2ffd2018-03-20 10:06:59 -07004476 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
David Ahernacb54e32018-04-17 17:33:22 -07004477 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07004478 if (IS_ERR(rt)) {
4479 err = PTR_ERR(rt);
4480 rt = NULL;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004481 goto cleanup;
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07004482 }
David Ahernb5d2d752018-07-15 09:35:19 -07004483 if (!rt6_qualify_for_ecmp(rt)) {
4484 err = -EINVAL;
4485 NL_SET_ERR_MSG(extack,
4486 "Device only routes can not be added for IPv6 using the multipath API.");
4487 fib6_info_release(rt);
4488 goto cleanup;
4489 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004490
David Ahernad1601a2019-03-27 20:53:56 -07004491 rt->fib6_nh.fib_nh_weight = rtnh->rtnh_hops + 1;
Ido Schimmel398958a2018-01-09 16:40:28 +02004492
David Ahernd4ead6b2018-04-17 17:33:16 -07004493 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
4494 rt, &r_cfg);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004495 if (err) {
David Ahern93531c62018-04-17 17:33:25 -07004496 fib6_info_release(rt);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004497 goto cleanup;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004498 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004499
4500 rtnh = rtnh_next(rtnh, &remaining);
4501 }
4502
David Ahern3b1137f2017-02-02 12:37:10 -08004503 /* for add and replace send one notification with all nexthops.
4504 * Skip the notification in fib6_add_rt2node and send one with
4505 * the full route when done
4506 */
4507 info->skip_notify = 1;
4508
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004509 err_nh = NULL;
4510 list_for_each_entry(nh, &rt6_nh_list, next) {
David Ahern8d1c8022018-04-17 17:33:26 -07004511 err = __ip6_ins_rt(nh->fib6_info, info, extack);
4512 fib6_info_release(nh->fib6_info);
David Ahern3b1137f2017-02-02 12:37:10 -08004513
David Ahernf7225172018-06-04 13:41:42 -07004514 if (!err) {
4515 /* save reference to last route successfully inserted */
4516 rt_last = nh->fib6_info;
4517
4518 /* save reference to first route for notification */
4519 if (!rt_notif)
4520 rt_notif = nh->fib6_info;
4521 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004522
David Ahern8d1c8022018-04-17 17:33:26 -07004523 /* nh->fib6_info is used or freed at this point, reset to NULL*/
4524 nh->fib6_info = NULL;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004525 if (err) {
4526 if (replace && nhn)
Jakub Kicinskia5a82d82019-01-14 10:52:45 -08004527 NL_SET_ERR_MSG_MOD(extack,
4528 "multipath route replace failed (check consistency of installed routes)");
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004529 err_nh = nh;
4530 goto add_errout;
4531 }
4532
Nicolas Dichtel1a724182012-11-01 22:58:22 +00004533 /* Because each route is added like a single route we remove
Michal Kubeček27596472015-05-18 20:54:00 +02004534 * these flags after the first nexthop: if there is a collision,
4535 * we have already failed to add the first nexthop:
4536 * fib6_add_rt2node() has rejected it; when replacing, old
4537 * nexthops have been replaced by first new, the rest should
4538 * be added to it.
Nicolas Dichtel1a724182012-11-01 22:58:22 +00004539 */
Michal Kubeček27596472015-05-18 20:54:00 +02004540 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
4541 NLM_F_REPLACE);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004542 nhn++;
4543 }
4544
David Ahern3b1137f2017-02-02 12:37:10 -08004545 /* success ... tell user about new route */
4546 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004547 goto cleanup;
4548
4549add_errout:
David Ahern3b1137f2017-02-02 12:37:10 -08004550 /* send notification for routes that were added so that
4551 * the delete notifications sent by ip6_route_del are
4552 * coherent
4553 */
4554 if (rt_notif)
4555 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4556
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004557 /* Delete routes that were already added */
4558 list_for_each_entry(nh, &rt6_nh_list, next) {
4559 if (err_nh == nh)
4560 break;
David Ahern333c4302017-05-21 10:12:04 -06004561 ip6_route_del(&nh->r_cfg, extack);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004562 }
4563
4564cleanup:
4565 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
David Ahern8d1c8022018-04-17 17:33:26 -07004566 if (nh->fib6_info)
4567 fib6_info_release(nh->fib6_info);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004568 list_del(&nh->next);
4569 kfree(nh);
4570 }
4571
4572 return err;
4573}
4574
David Ahern333c4302017-05-21 10:12:04 -06004575static int ip6_route_multipath_del(struct fib6_config *cfg,
4576 struct netlink_ext_ack *extack)
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004577{
4578 struct fib6_config r_cfg;
4579 struct rtnexthop *rtnh;
4580 int remaining;
4581 int attrlen;
4582 int err = 1, last_err = 0;
4583
4584 remaining = cfg->fc_mp_len;
4585 rtnh = (struct rtnexthop *)cfg->fc_mp;
4586
4587 /* Parse a Multipath Entry */
4588 while (rtnh_ok(rtnh, remaining)) {
4589 memcpy(&r_cfg, cfg, sizeof(*cfg));
4590 if (rtnh->rtnh_ifindex)
4591 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4592
4593 attrlen = rtnh_attrlen(rtnh);
4594 if (attrlen > 0) {
4595 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4596
4597 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4598 if (nla) {
4599 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
4600 r_cfg.fc_flags |= RTF_GATEWAY;
4601 }
4602 }
David Ahern333c4302017-05-21 10:12:04 -06004603 err = ip6_route_del(&r_cfg, extack);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004604 if (err)
4605 last_err = err;
4606
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004607 rtnh = rtnh_next(rtnh, &remaining);
4608 }
4609
4610 return last_err;
4611}
4612
David Ahernc21ef3e2017-04-16 09:48:24 -07004613static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4614 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004615{
Thomas Graf86872cb2006-08-22 00:01:08 -07004616 struct fib6_config cfg;
4617 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004618
David Ahern333c4302017-05-21 10:12:04 -06004619 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
Thomas Graf86872cb2006-08-22 00:01:08 -07004620 if (err < 0)
4621 return err;
4622
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004623 if (cfg.fc_mp)
David Ahern333c4302017-05-21 10:12:04 -06004624 return ip6_route_multipath_del(&cfg, extack);
David Ahern0ae81332017-02-02 12:37:08 -08004625 else {
4626 cfg.fc_delete_all_nh = 1;
David Ahern333c4302017-05-21 10:12:04 -06004627 return ip6_route_del(&cfg, extack);
David Ahern0ae81332017-02-02 12:37:08 -08004628 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004629}
4630
David Ahernc21ef3e2017-04-16 09:48:24 -07004631static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4632 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633{
Thomas Graf86872cb2006-08-22 00:01:08 -07004634 struct fib6_config cfg;
4635 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004636
David Ahern333c4302017-05-21 10:12:04 -06004637 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
Thomas Graf86872cb2006-08-22 00:01:08 -07004638 if (err < 0)
4639 return err;
4640
David Ahern67f69512019-03-21 05:21:34 -07004641 if (cfg.fc_metric == 0)
4642 cfg.fc_metric = IP6_RT_PRIO_USER;
4643
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004644 if (cfg.fc_mp)
David Ahern333c4302017-05-21 10:12:04 -06004645 return ip6_route_multipath_add(&cfg, extack);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004646 else
David Ahernacb54e32018-04-17 17:33:22 -07004647 return ip6_route_add(&cfg, GFP_KERNEL, extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004648}
4649
David Ahern8d1c8022018-04-17 17:33:26 -07004650static size_t rt6_nlmsg_size(struct fib6_info *rt)
Thomas Graf339bf982006-11-10 14:10:15 -08004651{
David Ahernbeb1afac52017-02-02 12:37:09 -08004652 int nexthop_len = 0;
4653
David Ahern93c2fb22018-04-18 15:38:59 -07004654 if (rt->fib6_nsiblings) {
David Ahernbeb1afac52017-02-02 12:37:09 -08004655 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
4656 + NLA_ALIGN(sizeof(struct rtnexthop))
4657 + nla_total_size(16) /* RTA_GATEWAY */
David Ahernad1601a2019-03-27 20:53:56 -07004658 + lwtunnel_get_encap_size(rt->fib6_nh.fib_nh_lws);
David Ahernbeb1afac52017-02-02 12:37:09 -08004659
David Ahern93c2fb22018-04-18 15:38:59 -07004660 nexthop_len *= rt->fib6_nsiblings;
David Ahernbeb1afac52017-02-02 12:37:09 -08004661 }
4662
Thomas Graf339bf982006-11-10 14:10:15 -08004663 return NLMSG_ALIGN(sizeof(struct rtmsg))
4664 + nla_total_size(16) /* RTA_SRC */
4665 + nla_total_size(16) /* RTA_DST */
4666 + nla_total_size(16) /* RTA_GATEWAY */
4667 + nla_total_size(16) /* RTA_PREFSRC */
4668 + nla_total_size(4) /* RTA_TABLE */
4669 + nla_total_size(4) /* RTA_IIF */
4670 + nla_total_size(4) /* RTA_OIF */
4671 + nla_total_size(4) /* RTA_PRIORITY */
Noriaki TAKAMIYA6a2b9ce2007-01-23 22:09:41 -08004672 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
Daniel Borkmannea697632015-01-05 23:57:47 +01004673 + nla_total_size(sizeof(struct rta_cacheinfo))
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004674 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004675 + nla_total_size(1) /* RTA_PREF */
David Ahernad1601a2019-03-27 20:53:56 -07004676 + lwtunnel_get_encap_size(rt->fib6_nh.fib_nh_lws)
David Ahernbeb1afac52017-02-02 12:37:09 -08004677 + nexthop_len;
4678}
4679
David Ahernd4ead6b2018-04-17 17:33:16 -07004680static int rt6_fill_node(struct net *net, struct sk_buff *skb,
David Ahern8d1c8022018-04-17 17:33:26 -07004681 struct fib6_info *rt, struct dst_entry *dst,
David Ahernd4ead6b2018-04-17 17:33:16 -07004682 struct in6_addr *dest, struct in6_addr *src,
Eric W. Biederman15e47302012-09-07 20:12:54 +00004683 int iif, int type, u32 portid, u32 seq,
David Ahernf8cfe2c2017-01-17 15:51:08 -08004684 unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004685{
Xin Long22d0bd82018-09-11 14:33:58 +08004686 struct rt6_info *rt6 = (struct rt6_info *)dst;
4687 struct rt6key *rt6_dst, *rt6_src;
4688 u32 *pmetrics, table, rt6_flags;
Thomas Graf2d7202b2006-08-22 00:01:27 -07004689 struct nlmsghdr *nlh;
Xin Long22d0bd82018-09-11 14:33:58 +08004690 struct rtmsg *rtm;
David Ahernd4ead6b2018-04-17 17:33:16 -07004691 long expires = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004692
Eric W. Biederman15e47302012-09-07 20:12:54 +00004693 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
David S. Miller38308472011-12-03 18:02:47 -05004694 if (!nlh)
Patrick McHardy26932562007-01-31 23:16:40 -08004695 return -EMSGSIZE;
Thomas Graf2d7202b2006-08-22 00:01:27 -07004696
Xin Long22d0bd82018-09-11 14:33:58 +08004697 if (rt6) {
4698 rt6_dst = &rt6->rt6i_dst;
4699 rt6_src = &rt6->rt6i_src;
4700 rt6_flags = rt6->rt6i_flags;
4701 } else {
4702 rt6_dst = &rt->fib6_dst;
4703 rt6_src = &rt->fib6_src;
4704 rt6_flags = rt->fib6_flags;
4705 }
4706
Thomas Graf2d7202b2006-08-22 00:01:27 -07004707 rtm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004708 rtm->rtm_family = AF_INET6;
Xin Long22d0bd82018-09-11 14:33:58 +08004709 rtm->rtm_dst_len = rt6_dst->plen;
4710 rtm->rtm_src_len = rt6_src->plen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004711 rtm->rtm_tos = 0;
David Ahern93c2fb22018-04-18 15:38:59 -07004712 if (rt->fib6_table)
4713 table = rt->fib6_table->tb6_id;
Thomas Grafc71099a2006-08-04 23:20:06 -07004714 else
Patrick McHardy9e762a42006-08-10 23:09:48 -07004715 table = RT6_TABLE_UNSPEC;
Kalash Nainwal97f00822019-02-20 16:23:04 -08004716 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
David S. Millerc78679e2012-04-01 20:27:33 -04004717 if (nla_put_u32(skb, RTA_TABLE, table))
4718 goto nla_put_failure;
David Aherne8478e82018-04-17 17:33:13 -07004719
4720 rtm->rtm_type = rt->fib6_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004721 rtm->rtm_flags = 0;
4722 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
David Ahern93c2fb22018-04-18 15:38:59 -07004723 rtm->rtm_protocol = rt->fib6_protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004724
Xin Long22d0bd82018-09-11 14:33:58 +08004725 if (rt6_flags & RTF_CACHE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004726 rtm->rtm_flags |= RTM_F_CLONED;
4727
David Ahernd4ead6b2018-04-17 17:33:16 -07004728 if (dest) {
4729 if (nla_put_in6_addr(skb, RTA_DST, dest))
David S. Millerc78679e2012-04-01 20:27:33 -04004730 goto nla_put_failure;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004731 rtm->rtm_dst_len = 128;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004732 } else if (rtm->rtm_dst_len)
Xin Long22d0bd82018-09-11 14:33:58 +08004733 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
David S. Millerc78679e2012-04-01 20:27:33 -04004734 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004735#ifdef CONFIG_IPV6_SUBTREES
4736 if (src) {
Jiri Benc930345e2015-03-29 16:59:25 +02004737 if (nla_put_in6_addr(skb, RTA_SRC, src))
David S. Millerc78679e2012-04-01 20:27:33 -04004738 goto nla_put_failure;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004739 rtm->rtm_src_len = 128;
David S. Millerc78679e2012-04-01 20:27:33 -04004740 } else if (rtm->rtm_src_len &&
Xin Long22d0bd82018-09-11 14:33:58 +08004741 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
David S. Millerc78679e2012-04-01 20:27:33 -04004742 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743#endif
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +09004744 if (iif) {
4745#ifdef CONFIG_IPV6_MROUTE
Xin Long22d0bd82018-09-11 14:33:58 +08004746 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
David Ahernfd61c6b2017-01-17 15:51:07 -08004747 int err = ip6mr_get_route(net, skb, rtm, portid);
Nikolay Aleksandrov2cf75072016-09-25 23:08:31 +02004748
David Ahernfd61c6b2017-01-17 15:51:07 -08004749 if (err == 0)
4750 return 0;
4751 if (err < 0)
4752 goto nla_put_failure;
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +09004753 } else
4754#endif
David S. Millerc78679e2012-04-01 20:27:33 -04004755 if (nla_put_u32(skb, RTA_IIF, iif))
4756 goto nla_put_failure;
David Ahernd4ead6b2018-04-17 17:33:16 -07004757 } else if (dest) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004758 struct in6_addr saddr_buf;
David Ahernd4ead6b2018-04-17 17:33:16 -07004759 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
Jiri Benc930345e2015-03-29 16:59:25 +02004760 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
David S. Millerc78679e2012-04-01 20:27:33 -04004761 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004762 }
Thomas Graf2d7202b2006-08-22 00:01:27 -07004763
David Ahern93c2fb22018-04-18 15:38:59 -07004764 if (rt->fib6_prefsrc.plen) {
Daniel Walterc3968a82011-04-13 21:10:57 +00004765 struct in6_addr saddr_buf;
David Ahern93c2fb22018-04-18 15:38:59 -07004766 saddr_buf = rt->fib6_prefsrc.addr;
Jiri Benc930345e2015-03-29 16:59:25 +02004767 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
David S. Millerc78679e2012-04-01 20:27:33 -04004768 goto nla_put_failure;
Daniel Walterc3968a82011-04-13 21:10:57 +00004769 }
4770
David Ahernd4ead6b2018-04-17 17:33:16 -07004771 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
4772 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
Thomas Graf2d7202b2006-08-22 00:01:27 -07004773 goto nla_put_failure;
4774
David Ahern93c2fb22018-04-18 15:38:59 -07004775 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
David S. Millerc78679e2012-04-01 20:27:33 -04004776 goto nla_put_failure;
Li Wei82539472012-07-29 16:01:30 +00004777
David Ahernbeb1afac52017-02-02 12:37:09 -08004778 /* For multipath routes, walk the siblings list and add
4779 * each as a nexthop within RTA_MULTIPATH.
4780 */
Xin Long22d0bd82018-09-11 14:33:58 +08004781 if (rt6) {
4782 if (rt6_flags & RTF_GATEWAY &&
4783 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
4784 goto nla_put_failure;
4785
4786 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
4787 goto nla_put_failure;
4788 } else if (rt->fib6_nsiblings) {
David Ahern8d1c8022018-04-17 17:33:26 -07004789 struct fib6_info *sibling, *next_sibling;
David Ahernbeb1afac52017-02-02 12:37:09 -08004790 struct nlattr *mp;
4791
Michal Kubecekae0be8d2019-04-26 11:13:06 +02004792 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
David Ahernbeb1afac52017-02-02 12:37:09 -08004793 if (!mp)
4794 goto nla_put_failure;
4795
David Ahernc0a72072019-04-02 14:11:58 -07004796 if (fib_add_nexthop(skb, &rt->fib6_nh.nh_common,
4797 rt->fib6_nh.fib_nh_weight) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08004798 goto nla_put_failure;
4799
4800 list_for_each_entry_safe(sibling, next_sibling,
David Ahern93c2fb22018-04-18 15:38:59 -07004801 &rt->fib6_siblings, fib6_siblings) {
David Ahernc0a72072019-04-02 14:11:58 -07004802 if (fib_add_nexthop(skb, &sibling->fib6_nh.nh_common,
4803 sibling->fib6_nh.fib_nh_weight) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08004804 goto nla_put_failure;
4805 }
4806
4807 nla_nest_end(skb, mp);
4808 } else {
David Ahernecc56632019-04-23 08:48:09 -07004809 unsigned char nh_flags = 0;
4810
David Ahernc0a72072019-04-02 14:11:58 -07004811 if (fib_nexthop_info(skb, &rt->fib6_nh.nh_common,
David Ahernecc56632019-04-23 08:48:09 -07004812 &nh_flags, false) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08004813 goto nla_put_failure;
David Ahernecc56632019-04-23 08:48:09 -07004814
4815 rtm->rtm_flags |= nh_flags;
David Ahernbeb1afac52017-02-02 12:37:09 -08004816 }
4817
Xin Long22d0bd82018-09-11 14:33:58 +08004818 if (rt6_flags & RTF_EXPIRES) {
David Ahern14895682018-04-17 17:33:17 -07004819 expires = dst ? dst->expires : rt->expires;
4820 expires -= jiffies;
4821 }
YOSHIFUJI Hideaki69cdf8f2008-05-19 16:55:13 -07004822
David Ahernd4ead6b2018-04-17 17:33:16 -07004823 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
Thomas Grafe3703b32006-11-27 09:27:07 -08004824 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004825
Xin Long22d0bd82018-09-11 14:33:58 +08004826 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004827 goto nla_put_failure;
4828
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004829
Johannes Berg053c0952015-01-16 22:09:00 +01004830 nlmsg_end(skb, nlh);
4831 return 0;
Thomas Graf2d7202b2006-08-22 00:01:27 -07004832
4833nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08004834 nlmsg_cancel(skb, nlh);
4835 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004836}
4837
David Ahern13e38902018-10-15 18:56:44 -07004838static bool fib6_info_uses_dev(const struct fib6_info *f6i,
4839 const struct net_device *dev)
4840{
David Ahernad1601a2019-03-27 20:53:56 -07004841 if (f6i->fib6_nh.fib_nh_dev == dev)
David Ahern13e38902018-10-15 18:56:44 -07004842 return true;
4843
4844 if (f6i->fib6_nsiblings) {
4845 struct fib6_info *sibling, *next_sibling;
4846
4847 list_for_each_entry_safe(sibling, next_sibling,
4848 &f6i->fib6_siblings, fib6_siblings) {
David Ahernad1601a2019-03-27 20:53:56 -07004849 if (sibling->fib6_nh.fib_nh_dev == dev)
David Ahern13e38902018-10-15 18:56:44 -07004850 return true;
4851 }
4852 }
4853
4854 return false;
4855}
4856
David Ahern8d1c8022018-04-17 17:33:26 -07004857int rt6_dump_route(struct fib6_info *rt, void *p_arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004858{
4859 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
David Ahern13e38902018-10-15 18:56:44 -07004860 struct fib_dump_filter *filter = &arg->filter;
4861 unsigned int flags = NLM_F_MULTI;
David Ahern1f17e2f2017-01-26 13:54:08 -08004862 struct net *net = arg->net;
4863
David Ahern421842e2018-04-17 17:33:18 -07004864 if (rt == net->ipv6.fib6_null_entry)
David Ahern1f17e2f2017-01-26 13:54:08 -08004865 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004866
David Ahern13e38902018-10-15 18:56:44 -07004867 if ((filter->flags & RTM_F_PREFIX) &&
4868 !(rt->fib6_flags & RTF_PREFIX_RT)) {
4869 /* success since this is not a prefix route */
4870 return 1;
4871 }
4872 if (filter->filter_set) {
4873 if ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
4874 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
4875 (filter->protocol && rt->fib6_protocol != filter->protocol)) {
David Ahernf8cfe2c2017-01-17 15:51:08 -08004876 return 1;
4877 }
David Ahern13e38902018-10-15 18:56:44 -07004878 flags |= NLM_F_DUMP_FILTERED;
David Ahernf8cfe2c2017-01-17 15:51:08 -08004879 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004880
David Ahernd4ead6b2018-04-17 17:33:16 -07004881 return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0,
4882 RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid,
David Ahern13e38902018-10-15 18:56:44 -07004883 arg->cb->nlh->nlmsg_seq, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004884}
4885
Jakub Kicinski0eff0a22019-01-18 10:46:24 -08004886static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
4887 const struct nlmsghdr *nlh,
4888 struct nlattr **tb,
4889 struct netlink_ext_ack *extack)
4890{
4891 struct rtmsg *rtm;
4892 int i, err;
4893
4894 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
4895 NL_SET_ERR_MSG_MOD(extack,
4896 "Invalid header for get route request");
4897 return -EINVAL;
4898 }
4899
4900 if (!netlink_strict_get_check(skb))
Johannes Berg8cb08172019-04-26 14:07:28 +02004901 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4902 rtm_ipv6_policy, extack);
Jakub Kicinski0eff0a22019-01-18 10:46:24 -08004903
4904 rtm = nlmsg_data(nlh);
4905 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
4906 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
4907 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
4908 rtm->rtm_type) {
4909 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
4910 return -EINVAL;
4911 }
4912 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
4913 NL_SET_ERR_MSG_MOD(extack,
4914 "Invalid flags for get route request");
4915 return -EINVAL;
4916 }
4917
Johannes Berg8cb08172019-04-26 14:07:28 +02004918 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
4919 rtm_ipv6_policy, extack);
Jakub Kicinski0eff0a22019-01-18 10:46:24 -08004920 if (err)
4921 return err;
4922
4923 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
4924 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
4925 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
4926 return -EINVAL;
4927 }
4928
4929 for (i = 0; i <= RTA_MAX; i++) {
4930 if (!tb[i])
4931 continue;
4932
4933 switch (i) {
4934 case RTA_SRC:
4935 case RTA_DST:
4936 case RTA_IIF:
4937 case RTA_OIF:
4938 case RTA_MARK:
4939 case RTA_UID:
4940 case RTA_SPORT:
4941 case RTA_DPORT:
4942 case RTA_IP_PROTO:
4943 break;
4944 default:
4945 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
4946 return -EINVAL;
4947 }
4948 }
4949
4950 return 0;
4951}
4952
David Ahernc21ef3e2017-04-16 09:48:24 -07004953static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4954 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004955{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09004956 struct net *net = sock_net(in_skb->sk);
Thomas Grafab364a62006-08-22 00:01:47 -07004957 struct nlattr *tb[RTA_MAX+1];
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004958 int err, iif = 0, oif = 0;
David Aherna68886a2018-04-20 15:38:02 -07004959 struct fib6_info *from;
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004960 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004961 struct rt6_info *rt;
Thomas Grafab364a62006-08-22 00:01:47 -07004962 struct sk_buff *skb;
4963 struct rtmsg *rtm;
Maciej Żenczykowski744486d2018-09-29 23:44:54 -07004964 struct flowi6 fl6 = {};
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004965 bool fibmatch;
Thomas Grafab364a62006-08-22 00:01:47 -07004966
Jakub Kicinski0eff0a22019-01-18 10:46:24 -08004967 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
Thomas Grafab364a62006-08-22 00:01:47 -07004968 if (err < 0)
4969 goto errout;
4970
4971 err = -EINVAL;
Hannes Frederic Sowa38b70972016-06-11 20:08:19 +02004972 rtm = nlmsg_data(nlh);
4973 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004974 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
Thomas Grafab364a62006-08-22 00:01:47 -07004975
4976 if (tb[RTA_SRC]) {
4977 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
4978 goto errout;
4979
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00004980 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
Thomas Grafab364a62006-08-22 00:01:47 -07004981 }
4982
4983 if (tb[RTA_DST]) {
4984 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
4985 goto errout;
4986
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00004987 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
Thomas Grafab364a62006-08-22 00:01:47 -07004988 }
4989
4990 if (tb[RTA_IIF])
4991 iif = nla_get_u32(tb[RTA_IIF]);
4992
4993 if (tb[RTA_OIF])
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00004994 oif = nla_get_u32(tb[RTA_OIF]);
Thomas Grafab364a62006-08-22 00:01:47 -07004995
Lorenzo Colitti2e47b292014-05-15 16:38:41 -07004996 if (tb[RTA_MARK])
4997 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
4998
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +09004999 if (tb[RTA_UID])
5000 fl6.flowi6_uid = make_kuid(current_user_ns(),
5001 nla_get_u32(tb[RTA_UID]));
5002 else
5003 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
5004
Roopa Prabhueacb9382018-05-22 14:03:28 -07005005 if (tb[RTA_SPORT])
5006 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
5007
5008 if (tb[RTA_DPORT])
5009 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
5010
5011 if (tb[RTA_IP_PROTO]) {
5012 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
Hangbin Liu5e1a99e2019-02-27 16:15:29 +08005013 &fl6.flowi6_proto, AF_INET6,
5014 extack);
Roopa Prabhueacb9382018-05-22 14:03:28 -07005015 if (err)
5016 goto errout;
5017 }
5018
Thomas Grafab364a62006-08-22 00:01:47 -07005019 if (iif) {
5020 struct net_device *dev;
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00005021 int flags = 0;
5022
Florian Westphal121622d2017-08-15 16:34:42 +02005023 rcu_read_lock();
5024
5025 dev = dev_get_by_index_rcu(net, iif);
Thomas Grafab364a62006-08-22 00:01:47 -07005026 if (!dev) {
Florian Westphal121622d2017-08-15 16:34:42 +02005027 rcu_read_unlock();
Thomas Grafab364a62006-08-22 00:01:47 -07005028 err = -ENODEV;
5029 goto errout;
5030 }
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00005031
5032 fl6.flowi6_iif = iif;
5033
5034 if (!ipv6_addr_any(&fl6.saddr))
5035 flags |= RT6_LOOKUP_F_HAS_SADDR;
5036
David Ahernb75cc8f2018-03-02 08:32:17 -08005037 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
Florian Westphal121622d2017-08-15 16:34:42 +02005038
5039 rcu_read_unlock();
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00005040 } else {
5041 fl6.flowi6_oif = oif;
5042
Ido Schimmel58acfd72017-12-20 12:28:25 +02005043 dst = ip6_route_output(net, NULL, &fl6);
Roopa Prabhu18c3a612017-05-25 10:42:40 -07005044 }
5045
Roopa Prabhu18c3a612017-05-25 10:42:40 -07005046
5047 rt = container_of(dst, struct rt6_info, dst);
5048 if (rt->dst.error) {
5049 err = rt->dst.error;
5050 ip6_rt_put(rt);
5051 goto errout;
Thomas Grafab364a62006-08-22 00:01:47 -07005052 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005053
WANG Cong9d6acb32017-03-01 20:48:39 -08005054 if (rt == net->ipv6.ip6_null_entry) {
5055 err = rt->dst.error;
5056 ip6_rt_put(rt);
5057 goto errout;
5058 }
5059
Linus Torvalds1da177e2005-04-16 15:20:36 -07005060 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
David S. Miller38308472011-12-03 18:02:47 -05005061 if (!skb) {
Amerigo Wang94e187c2012-10-29 00:13:19 +00005062 ip6_rt_put(rt);
Thomas Grafab364a62006-08-22 00:01:47 -07005063 err = -ENOBUFS;
5064 goto errout;
5065 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005066
Changli Gaod8d1f302010-06-10 23:31:35 -07005067 skb_dst_set(skb, &rt->dst);
David Aherna68886a2018-04-20 15:38:02 -07005068
5069 rcu_read_lock();
5070 from = rcu_dereference(rt->from);
Martin KaFai Lau886b7a52019-04-30 10:45:12 -07005071 if (from) {
5072 if (fibmatch)
5073 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
5074 iif, RTM_NEWROUTE,
5075 NETLINK_CB(in_skb).portid,
5076 nlh->nlmsg_seq, 0);
5077 else
5078 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
5079 &fl6.saddr, iif, RTM_NEWROUTE,
5080 NETLINK_CB(in_skb).portid,
5081 nlh->nlmsg_seq, 0);
5082 } else {
5083 err = -ENETUNREACH;
5084 }
David Aherna68886a2018-04-20 15:38:02 -07005085 rcu_read_unlock();
5086
Linus Torvalds1da177e2005-04-16 15:20:36 -07005087 if (err < 0) {
Thomas Grafab364a62006-08-22 00:01:47 -07005088 kfree_skb(skb);
5089 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005090 }
5091
Eric W. Biederman15e47302012-09-07 20:12:54 +00005092 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
Thomas Grafab364a62006-08-22 00:01:47 -07005093errout:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005094 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005095}
5096
David Ahern8d1c8022018-04-17 17:33:26 -07005097void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
Roopa Prabhu37a1d362015-09-13 10:18:33 -07005098 unsigned int nlm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005099{
5100 struct sk_buff *skb;
Daniel Lezcano55786892008-03-04 13:47:47 -08005101 struct net *net = info->nl_net;
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08005102 u32 seq;
5103 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005104
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08005105 err = -ENOBUFS;
David S. Miller38308472011-12-03 18:02:47 -05005106 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
Thomas Graf86872cb2006-08-22 00:01:08 -07005107
Roopa Prabhu19e42e42015-07-21 10:43:48 +02005108 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
David S. Miller38308472011-12-03 18:02:47 -05005109 if (!skb)
Thomas Graf21713eb2006-08-15 00:35:24 -07005110 goto errout;
5111
David Ahernd4ead6b2018-04-17 17:33:16 -07005112 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5113 event, info->portid, seq, nlm_flags);
Patrick McHardy26932562007-01-31 23:16:40 -08005114 if (err < 0) {
5115 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5116 WARN_ON(err == -EMSGSIZE);
5117 kfree_skb(skb);
5118 goto errout;
5119 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00005120 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08005121 info->nlh, gfp_any());
5122 return;
Thomas Graf21713eb2006-08-15 00:35:24 -07005123errout:
5124 if (err < 0)
Daniel Lezcano55786892008-03-04 13:47:47 -08005125 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005126}
5127
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005128static int ip6_route_dev_notify(struct notifier_block *this,
Jiri Pirko351638e2013-05-28 01:30:21 +00005129 unsigned long event, void *ptr)
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005130{
Jiri Pirko351638e2013-05-28 01:30:21 +00005131 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09005132 struct net *net = dev_net(dev);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005133
WANG Cong242d3a42017-05-08 10:12:13 -07005134 if (!(dev->flags & IFF_LOOPBACK))
5135 return NOTIFY_OK;
5136
5137 if (event == NETDEV_REGISTER) {
David Ahernad1601a2019-03-27 20:53:56 -07005138 net->ipv6.fib6_null_entry->fib6_nh.fib_nh_dev = dev;
Changli Gaod8d1f302010-06-10 23:31:35 -07005139 net->ipv6.ip6_null_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005140 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
5141#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Changli Gaod8d1f302010-06-10 23:31:35 -07005142 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005143 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
Changli Gaod8d1f302010-06-10 23:31:35 -07005144 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005145 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
5146#endif
WANG Cong76da0702017-06-20 11:42:27 -07005147 } else if (event == NETDEV_UNREGISTER &&
5148 dev->reg_state != NETREG_UNREGISTERED) {
5149 /* NETDEV_UNREGISTER could be fired for multiple times by
5150 * netdev_wait_allrefs(). Make sure we only call this once.
5151 */
Eric Dumazet12d94a82017-08-15 04:09:51 -07005152 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
WANG Cong242d3a42017-05-08 10:12:13 -07005153#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Eric Dumazet12d94a82017-08-15 04:09:51 -07005154 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
5155 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
WANG Cong242d3a42017-05-08 10:12:13 -07005156#endif
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005157 }
5158
5159 return NOTIFY_OK;
5160}
5161
Linus Torvalds1da177e2005-04-16 15:20:36 -07005162/*
5163 * /proc
5164 */
5165
5166#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07005167static int rt6_stats_seq_show(struct seq_file *seq, void *v)
5168{
Daniel Lezcano69ddb802008-03-04 13:46:23 -08005169 struct net *net = (struct net *)seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005170 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
Daniel Lezcano69ddb802008-03-04 13:46:23 -08005171 net->ipv6.rt6_stats->fib_nodes,
5172 net->ipv6.rt6_stats->fib_route_nodes,
Wei Wang81eb8442017-10-06 12:06:11 -07005173 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
Daniel Lezcano69ddb802008-03-04 13:46:23 -08005174 net->ipv6.rt6_stats->fib_rt_entries,
5175 net->ipv6.rt6_stats->fib_rt_cache,
Eric Dumazetfc66f952010-10-08 06:37:34 +00005176 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
Daniel Lezcano69ddb802008-03-04 13:46:23 -08005177 net->ipv6.rt6_stats->fib_discarded_routes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005178
5179 return 0;
5180}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005181#endif /* CONFIG_PROC_FS */
5182
5183#ifdef CONFIG_SYSCTL
5184
Linus Torvalds1da177e2005-04-16 15:20:36 -07005185static
Joe Perchesfe2c6332013-06-11 23:04:25 -07005186int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005187 void __user *buffer, size_t *lenp, loff_t *ppos)
5188{
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00005189 struct net *net;
5190 int delay;
Aditya Pakkif0fb9b22018-12-24 10:30:17 -06005191 int ret;
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00005192 if (!write)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005193 return -EINVAL;
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00005194
5195 net = (struct net *)ctl->extra1;
5196 delay = net->ipv6.sysctl.flush_delay;
Aditya Pakkif0fb9b22018-12-24 10:30:17 -06005197 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5198 if (ret)
5199 return ret;
5200
Michal Kubeček2ac3ac82013-08-01 10:04:14 +02005201 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00005202 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005203}
5204
David Ahern7c6bb7d2018-10-11 20:17:21 -07005205static int zero;
5206static int one = 1;
5207
David Aherned792e22018-10-08 14:06:34 -07005208static struct ctl_table ipv6_route_table_template[] = {
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09005209 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005210 .procname = "flush",
Daniel Lezcano49905092008-01-10 03:01:01 -08005211 .data = &init_net.ipv6.sysctl.flush_delay,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005212 .maxlen = sizeof(int),
Dave Jones89c8b3a12005-04-28 12:11:49 -07005213 .mode = 0200,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08005214 .proc_handler = ipv6_sysctl_rtcache_flush
Linus Torvalds1da177e2005-04-16 15:20:36 -07005215 },
5216 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005217 .procname = "gc_thresh",
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -08005218 .data = &ip6_dst_ops_template.gc_thresh,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005219 .maxlen = sizeof(int),
5220 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08005221 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005222 },
5223 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005224 .procname = "max_size",
Daniel Lezcano49905092008-01-10 03:01:01 -08005225 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005226 .maxlen = sizeof(int),
5227 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08005228 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005229 },
5230 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005231 .procname = "gc_min_interval",
Daniel Lezcano49905092008-01-10 03:01:01 -08005232 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005233 .maxlen = sizeof(int),
5234 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08005235 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005236 },
5237 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005238 .procname = "gc_timeout",
Daniel Lezcano49905092008-01-10 03:01:01 -08005239 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005240 .maxlen = sizeof(int),
5241 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08005242 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005243 },
5244 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005245 .procname = "gc_interval",
Daniel Lezcano49905092008-01-10 03:01:01 -08005246 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005247 .maxlen = sizeof(int),
5248 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08005249 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005250 },
5251 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005252 .procname = "gc_elasticity",
Daniel Lezcano49905092008-01-10 03:01:01 -08005253 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005254 .maxlen = sizeof(int),
5255 .mode = 0644,
Min Zhangf3d3f612010-08-14 22:42:51 -07005256 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005257 },
5258 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005259 .procname = "mtu_expires",
Daniel Lezcano49905092008-01-10 03:01:01 -08005260 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005261 .maxlen = sizeof(int),
5262 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08005263 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005264 },
5265 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005266 .procname = "min_adv_mss",
Daniel Lezcano49905092008-01-10 03:01:01 -08005267 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005268 .maxlen = sizeof(int),
5269 .mode = 0644,
Min Zhangf3d3f612010-08-14 22:42:51 -07005270 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005271 },
5272 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273 .procname = "gc_min_interval_ms",
Daniel Lezcano49905092008-01-10 03:01:01 -08005274 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005275 .maxlen = sizeof(int),
5276 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08005277 .proc_handler = proc_dointvec_ms_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278 },
David Ahern7c6bb7d2018-10-11 20:17:21 -07005279 {
5280 .procname = "skip_notify_on_dev_down",
5281 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
5282 .maxlen = sizeof(int),
5283 .mode = 0644,
Eiichi Tsukatab8e8a862019-06-25 12:08:01 +09005284 .proc_handler = proc_dointvec_minmax,
David Ahern7c6bb7d2018-10-11 20:17:21 -07005285 .extra1 = &zero,
5286 .extra2 = &one,
5287 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08005288 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005289};
5290
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00005291struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
Daniel Lezcano760f2d02008-01-10 02:53:43 -08005292{
5293 struct ctl_table *table;
5294
5295 table = kmemdup(ipv6_route_table_template,
5296 sizeof(ipv6_route_table_template),
5297 GFP_KERNEL);
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09005298
5299 if (table) {
5300 table[0].data = &net->ipv6.sysctl.flush_delay;
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00005301 table[0].extra1 = net;
Alexey Dobriyan86393e52009-08-29 01:34:49 +00005302 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09005303 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
5304 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5305 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
5306 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
5307 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
5308 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
5309 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
Alexey Dobriyan9c69fab2009-12-18 20:11:03 -08005310 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
David Ahern7c6bb7d2018-10-11 20:17:21 -07005311 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
Eric W. Biederman464dc802012-11-16 03:02:59 +00005312
5313 /* Don't export sysctls to unprivileged users */
5314 if (net->user_ns != &init_user_ns)
5315 table[0].procname = NULL;
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09005316 }
5317
Daniel Lezcano760f2d02008-01-10 02:53:43 -08005318 return table;
5319}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005320#endif
5321
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00005322static int __net_init ip6_route_net_init(struct net *net)
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005323{
Pavel Emelyanov633d424b2008-04-21 14:25:23 -07005324 int ret = -ENOMEM;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005325
Alexey Dobriyan86393e52009-08-29 01:34:49 +00005326 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
5327 sizeof(net->ipv6.ip6_dst_ops));
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005328
Eric Dumazetfc66f952010-10-08 06:37:34 +00005329 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
5330 goto out_ip6_dst_ops;
5331
David Ahern421842e2018-04-17 17:33:18 -07005332 net->ipv6.fib6_null_entry = kmemdup(&fib6_null_entry_template,
5333 sizeof(*net->ipv6.fib6_null_entry),
5334 GFP_KERNEL);
5335 if (!net->ipv6.fib6_null_entry)
5336 goto out_ip6_dst_entries;
5337
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005338 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
5339 sizeof(*net->ipv6.ip6_null_entry),
5340 GFP_KERNEL);
5341 if (!net->ipv6.ip6_null_entry)
David Ahern421842e2018-04-17 17:33:18 -07005342 goto out_fib6_null_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07005343 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08005344 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
5345 ip6_template_metrics, true);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005346
5347#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Vincent Bernatfeca7d82017-08-08 20:23:49 +02005348 net->ipv6.fib6_has_custom_rules = false;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005349 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
5350 sizeof(*net->ipv6.ip6_prohibit_entry),
5351 GFP_KERNEL);
Peter Zijlstra68fffc62008-10-07 14:12:10 -07005352 if (!net->ipv6.ip6_prohibit_entry)
5353 goto out_ip6_null_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07005354 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08005355 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
5356 ip6_template_metrics, true);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005357
5358 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
5359 sizeof(*net->ipv6.ip6_blk_hole_entry),
5360 GFP_KERNEL);
Peter Zijlstra68fffc62008-10-07 14:12:10 -07005361 if (!net->ipv6.ip6_blk_hole_entry)
5362 goto out_ip6_prohibit_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07005363 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08005364 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
5365 ip6_template_metrics, true);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005366#endif
5367
Peter Zijlstrab339a47c2008-10-07 14:15:00 -07005368 net->ipv6.sysctl.flush_delay = 0;
5369 net->ipv6.sysctl.ip6_rt_max_size = 4096;
5370 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
5371 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
5372 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
5373 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
5374 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
5375 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
David Ahern7c6bb7d2018-10-11 20:17:21 -07005376 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
Peter Zijlstrab339a47c2008-10-07 14:15:00 -07005377
Benjamin Thery6891a342008-03-04 13:49:47 -08005378 net->ipv6.ip6_rt_gc_expire = 30*HZ;
5379
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005380 ret = 0;
5381out:
5382 return ret;
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005383
Peter Zijlstra68fffc62008-10-07 14:12:10 -07005384#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5385out_ip6_prohibit_entry:
5386 kfree(net->ipv6.ip6_prohibit_entry);
5387out_ip6_null_entry:
5388 kfree(net->ipv6.ip6_null_entry);
5389#endif
David Ahern421842e2018-04-17 17:33:18 -07005390out_fib6_null_entry:
5391 kfree(net->ipv6.fib6_null_entry);
Eric Dumazetfc66f952010-10-08 06:37:34 +00005392out_ip6_dst_entries:
5393 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005394out_ip6_dst_ops:
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005395 goto out;
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005396}
5397
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00005398static void __net_exit ip6_route_net_exit(struct net *net)
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005399{
David Ahern421842e2018-04-17 17:33:18 -07005400 kfree(net->ipv6.fib6_null_entry);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005401 kfree(net->ipv6.ip6_null_entry);
5402#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5403 kfree(net->ipv6.ip6_prohibit_entry);
5404 kfree(net->ipv6.ip6_blk_hole_entry);
5405#endif
Xiaotian Feng41bb78b2010-11-02 16:11:05 +00005406 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005407}
5408
Thomas Grafd1896342012-06-18 12:08:33 +00005409static int __net_init ip6_route_net_init_late(struct net *net)
5410{
5411#ifdef CONFIG_PROC_FS
Christoph Hellwigc3506372018-04-10 19:42:55 +02005412 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
5413 sizeof(struct ipv6_route_iter));
Christoph Hellwig3617d942018-04-13 20:38:35 +02005414 proc_create_net_single("rt6_stats", 0444, net->proc_net,
5415 rt6_stats_seq_show, NULL);
Thomas Grafd1896342012-06-18 12:08:33 +00005416#endif
5417 return 0;
5418}
5419
5420static void __net_exit ip6_route_net_exit_late(struct net *net)
5421{
5422#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00005423 remove_proc_entry("ipv6_route", net->proc_net);
5424 remove_proc_entry("rt6_stats", net->proc_net);
Thomas Grafd1896342012-06-18 12:08:33 +00005425#endif
5426}
5427
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005428static struct pernet_operations ip6_route_net_ops = {
5429 .init = ip6_route_net_init,
5430 .exit = ip6_route_net_exit,
5431};
5432
David S. Millerc3426b42012-06-09 16:27:05 -07005433static int __net_init ipv6_inetpeer_init(struct net *net)
5434{
5435 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
5436
5437 if (!bp)
5438 return -ENOMEM;
5439 inet_peer_base_init(bp);
5440 net->ipv6.peers = bp;
5441 return 0;
5442}
5443
5444static void __net_exit ipv6_inetpeer_exit(struct net *net)
5445{
5446 struct inet_peer_base *bp = net->ipv6.peers;
5447
5448 net->ipv6.peers = NULL;
David S. Miller56a6b242012-06-09 16:32:41 -07005449 inetpeer_invalidate_tree(bp);
David S. Millerc3426b42012-06-09 16:27:05 -07005450 kfree(bp);
5451}
5452
David S. Miller2b823f72012-06-09 19:00:16 -07005453static struct pernet_operations ipv6_inetpeer_ops = {
David S. Millerc3426b42012-06-09 16:27:05 -07005454 .init = ipv6_inetpeer_init,
5455 .exit = ipv6_inetpeer_exit,
5456};
5457
Thomas Grafd1896342012-06-18 12:08:33 +00005458static struct pernet_operations ip6_route_net_late_ops = {
5459 .init = ip6_route_net_init_late,
5460 .exit = ip6_route_net_exit_late,
5461};
5462
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005463static struct notifier_block ip6_route_dev_notifier = {
5464 .notifier_call = ip6_route_dev_notify,
WANG Cong242d3a42017-05-08 10:12:13 -07005465 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005466};
5467
WANG Cong2f460932017-05-03 22:07:31 -07005468void __init ip6_route_init_special_entries(void)
5469{
5470 /* Registering of the loopback is done before this portion of code,
5471 * the loopback reference in rt6_info will not be taken, do it
5472 * manually for init_net */
David Ahernad1601a2019-03-27 20:53:56 -07005473 init_net.ipv6.fib6_null_entry->fib6_nh.fib_nh_dev = init_net.loopback_dev;
WANG Cong2f460932017-05-03 22:07:31 -07005474 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
5475 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5476 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5477 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
5478 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5479 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
5480 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5481 #endif
5482}
5483
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005484int __init ip6_route_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005485{
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005486 int ret;
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -07005487 int cpu;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005488
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -08005489 ret = -ENOMEM;
5490 ip6_dst_ops_template.kmem_cachep =
5491 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
5492 SLAB_HWCACHE_ALIGN, NULL);
5493 if (!ip6_dst_ops_template.kmem_cachep)
Fernando Carrijoc19a28e2009-01-07 18:09:08 -08005494 goto out;
David S. Miller14e50e52007-05-24 18:17:54 -07005495
Eric Dumazetfc66f952010-10-08 06:37:34 +00005496 ret = dst_entries_init(&ip6_dst_blackhole_ops);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005497 if (ret)
Daniel Lezcanobdb32892008-03-04 13:48:10 -08005498 goto out_kmem_cache;
Daniel Lezcanobdb32892008-03-04 13:48:10 -08005499
David S. Millerc3426b42012-06-09 16:27:05 -07005500 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
5501 if (ret)
David S. Millere8803b62012-06-16 01:12:19 -07005502 goto out_dst_entries;
Thomas Graf2a0c4512012-06-14 23:00:17 +00005503
David S. Miller7e52b332012-06-15 15:51:55 -07005504 ret = register_pernet_subsys(&ip6_route_net_ops);
5505 if (ret)
5506 goto out_register_inetpeer;
David S. Millerc3426b42012-06-09 16:27:05 -07005507
Arnaud Ebalard5dc121e2008-10-01 02:37:56 -07005508 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
5509
David S. Millere8803b62012-06-16 01:12:19 -07005510 ret = fib6_init();
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005511 if (ret)
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005512 goto out_register_subsys;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005513
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005514 ret = xfrm6_init();
5515 if (ret)
David S. Millere8803b62012-06-16 01:12:19 -07005516 goto out_fib6_init;
Daniel Lezcanoc35b7e72007-12-08 00:14:11 -08005517
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005518 ret = fib6_rules_init();
5519 if (ret)
5520 goto xfrm6_init;
Daniel Lezcano7e5449c2007-12-08 00:14:54 -08005521
Thomas Grafd1896342012-06-18 12:08:33 +00005522 ret = register_pernet_subsys(&ip6_route_net_late_ops);
5523 if (ret)
5524 goto fib6_rules_init;
5525
Florian Westphal16feebc2017-12-02 21:44:08 +01005526 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
5527 inet6_rtm_newroute, NULL, 0);
5528 if (ret < 0)
5529 goto out_register_late_subsys;
5530
5531 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
5532 inet6_rtm_delroute, NULL, 0);
5533 if (ret < 0)
5534 goto out_register_late_subsys;
5535
5536 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
5537 inet6_rtm_getroute, NULL,
5538 RTNL_FLAG_DOIT_UNLOCKED);
5539 if (ret < 0)
Thomas Grafd1896342012-06-18 12:08:33 +00005540 goto out_register_late_subsys;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005541
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005542 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005543 if (ret)
Thomas Grafd1896342012-06-18 12:08:33 +00005544 goto out_register_late_subsys;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005545
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -07005546 for_each_possible_cpu(cpu) {
5547 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
5548
5549 INIT_LIST_HEAD(&ul->head);
5550 spin_lock_init(&ul->lock);
5551 }
5552
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005553out:
5554 return ret;
5555
Thomas Grafd1896342012-06-18 12:08:33 +00005556out_register_late_subsys:
Florian Westphal16feebc2017-12-02 21:44:08 +01005557 rtnl_unregister_all(PF_INET6);
Thomas Grafd1896342012-06-18 12:08:33 +00005558 unregister_pernet_subsys(&ip6_route_net_late_ops);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005559fib6_rules_init:
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005560 fib6_rules_cleanup();
5561xfrm6_init:
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005562 xfrm6_fini();
Thomas Graf2a0c4512012-06-14 23:00:17 +00005563out_fib6_init:
5564 fib6_gc_cleanup();
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005565out_register_subsys:
5566 unregister_pernet_subsys(&ip6_route_net_ops);
David S. Miller7e52b332012-06-15 15:51:55 -07005567out_register_inetpeer:
5568 unregister_pernet_subsys(&ipv6_inetpeer_ops);
Eric Dumazetfc66f952010-10-08 06:37:34 +00005569out_dst_entries:
5570 dst_entries_destroy(&ip6_dst_blackhole_ops);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005571out_kmem_cache:
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005572 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005573 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005574}
5575
5576void ip6_route_cleanup(void)
5577{
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005578 unregister_netdevice_notifier(&ip6_route_dev_notifier);
Thomas Grafd1896342012-06-18 12:08:33 +00005579 unregister_pernet_subsys(&ip6_route_net_late_ops);
Thomas Graf101367c2006-08-04 03:39:02 -07005580 fib6_rules_cleanup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005581 xfrm6_fini();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005582 fib6_gc_cleanup();
David S. Millerc3426b42012-06-09 16:27:05 -07005583 unregister_pernet_subsys(&ipv6_inetpeer_ops);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005584 unregister_pernet_subsys(&ip6_route_net_ops);
Xiaotian Feng41bb78b2010-11-02 16:11:05 +00005585 dst_entries_destroy(&ip6_dst_blackhole_ops);
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005586 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005587}