blob: 6ad2fa51a23a1597098918789ae24dbc2613a95d [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * Linux INET6 implementation
4 * FIB front-end.
5 *
6 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09007 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 */
9
10/* Changes:
11 *
12 * YOSHIFUJI Hideaki @USAGI
13 * reworked default router selection.
14 * - respect outgoing interface
15 * - select from (probably) reachable routers (i.e.
16 * routers in REACHABLE, STALE, DELAY or PROBE states).
17 * - always select the same router if it is (probably)
18 * reachable. otherwise, round-robin the list.
YOSHIFUJI Hideakic0bece92006-08-23 17:23:25 -070019 * Ville Nuorvala
20 * Fixed routing subtrees.
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
22
Joe Perchesf3213832012-05-15 14:11:53 +000023#define pr_fmt(fmt) "IPv6: " fmt
24
Randy Dunlap4fc268d2006-01-11 12:17:47 -080025#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/errno.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040027#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/types.h>
29#include <linux/times.h>
30#include <linux/socket.h>
31#include <linux/sockios.h>
32#include <linux/net.h>
33#include <linux/route.h>
34#include <linux/netdevice.h>
35#include <linux/in6.h>
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +090036#include <linux/mroute6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070038#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <linux/proc_fs.h>
40#include <linux/seq_file.h>
Daniel Lezcano5b7c9312008-03-03 23:28:58 -080041#include <linux/nsproxy.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090042#include <linux/slab.h>
Wei Wang35732d02017-10-06 12:05:57 -070043#include <linux/jhash.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020044#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <net/snmp.h>
46#include <net/ipv6.h>
47#include <net/ip6_fib.h>
48#include <net/ip6_route.h>
49#include <net/ndisc.h>
50#include <net/addrconf.h>
51#include <net/tcp.h>
52#include <linux/rtnetlink.h>
53#include <net/dst.h>
Jiri Benc904af042015-08-20 13:56:31 +020054#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070055#include <net/xfrm.h>
Tom Tucker8d717402006-07-30 20:43:36 -070056#include <net/netevent.h>
Thomas Graf21713eb2006-08-15 00:35:24 -070057#include <net/netlink.h>
David Ahern3c618c12019-04-20 09:28:20 -070058#include <net/rtnh.h>
Roopa Prabhu19e42e42015-07-21 10:43:48 +020059#include <net/lwtunnel.h>
Jiri Benc904af042015-08-20 13:56:31 +020060#include <net/ip_tunnels.h>
David Ahernca254492015-10-12 11:47:10 -070061#include <net/l3mdev.h>
Roopa Prabhueacb9382018-05-22 14:03:28 -070062#include <net/ip.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080063#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65#ifdef CONFIG_SYSCTL
66#include <linux/sysctl.h>
67#endif
68
David Ahern30d444d2018-05-23 17:08:48 -070069static int ip6_rt_type_to_error(u8 fib6_type);
70
71#define CREATE_TRACE_POINTS
72#include <trace/events/fib6.h>
73EXPORT_TRACEPOINT_SYMBOL_GPL(fib6_table_lookup);
74#undef CREATE_TRACE_POINTS
75
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +020076enum rt6_nud_state {
Jiri Benc7e980562013-12-11 13:48:20 +010077 RT6_NUD_FAIL_HARD = -3,
78 RT6_NUD_FAIL_PROBE = -2,
79 RT6_NUD_FAIL_DO_RR = -1,
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +020080 RT6_NUD_SUCCEED = 1
81};
82
Linus Torvalds1da177e2005-04-16 15:20:36 -070083static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
David S. Miller0dbaee32010-12-13 12:52:14 -080084static unsigned int ip6_default_advmss(const struct dst_entry *dst);
Steffen Klassertebb762f2011-11-23 02:12:51 +000085static unsigned int ip6_mtu(const struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static struct dst_entry *ip6_negative_advice(struct dst_entry *);
87static void ip6_dst_destroy(struct dst_entry *);
88static void ip6_dst_ifdown(struct dst_entry *,
89 struct net_device *dev, int how);
Daniel Lezcano569d3642008-01-18 03:56:57 -080090static int ip6_dst_gc(struct dst_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92static int ip6_pkt_discard(struct sk_buff *skb);
Eric W. Biedermanede20592015-10-07 16:48:47 -050093static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
Kamala R7150aed2013-12-02 19:55:21 +053094static int ip6_pkt_prohibit(struct sk_buff *skb);
Eric W. Biedermanede20592015-10-07 16:48:47 -050095static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096static void ip6_link_failure(struct sk_buff *skb);
David S. Miller6700c272012-07-17 03:29:28 -070097static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
Hangbin Liubd085ef2019-12-22 10:51:09 +080098 struct sk_buff *skb, u32 mtu,
99 bool confirm_neigh);
David S. Miller6700c272012-07-17 03:29:28 -0700100static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
101 struct sk_buff *skb);
David Ahern702cea52019-04-09 14:41:13 -0700102static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
103 int strict);
David Aherna1b7a1f2019-06-08 14:53:26 -0700104static size_t rt6_nlmsg_size(struct fib6_info *f6i);
David Ahernd4ead6b2018-04-17 17:33:16 -0700105static int rt6_fill_node(struct net *net, struct sk_buff *skb,
David Ahern8d1c8022018-04-17 17:33:26 -0700106 struct fib6_info *rt, struct dst_entry *dst,
David Ahernd4ead6b2018-04-17 17:33:16 -0700107 struct in6_addr *dest, struct in6_addr *src,
David Ahern16a16cd2017-02-02 12:37:11 -0800108 int iif, int type, u32 portid, u32 seq,
109 unsigned int flags);
David Ahern7e4b5122019-04-16 14:36:00 -0700110static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
Wei Wang510e2ce2019-05-16 13:30:54 -0700111 const struct in6_addr *daddr,
112 const struct in6_addr *saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800114#ifdef CONFIG_IPV6_ROUTE_INFO
David Ahern8d1c8022018-04-17 17:33:26 -0700115static struct fib6_info *rt6_add_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000116 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -0700117 const struct in6_addr *gwaddr,
118 struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +0000119 unsigned int pref);
David Ahern8d1c8022018-04-17 17:33:26 -0700120static struct fib6_info *rt6_get_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000121 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -0700122 const struct in6_addr *gwaddr,
123 struct net_device *dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800124#endif
125
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700126struct uncached_list {
127 spinlock_t lock;
128 struct list_head head;
129};
130
131static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
132
Xin Long510c3212018-02-14 19:06:02 +0800133void rt6_uncached_list_add(struct rt6_info *rt)
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700134{
135 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
136
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700137 rt->rt6i_uncached_list = ul;
138
139 spin_lock_bh(&ul->lock);
140 list_add_tail(&rt->rt6i_uncached, &ul->head);
141 spin_unlock_bh(&ul->lock);
142}
143
Xin Long510c3212018-02-14 19:06:02 +0800144void rt6_uncached_list_del(struct rt6_info *rt)
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700145{
146 if (!list_empty(&rt->rt6i_uncached)) {
147 struct uncached_list *ul = rt->rt6i_uncached_list;
Wei Wang81eb8442017-10-06 12:06:11 -0700148 struct net *net = dev_net(rt->dst.dev);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700149
150 spin_lock_bh(&ul->lock);
151 list_del(&rt->rt6i_uncached);
Wei Wang81eb8442017-10-06 12:06:11 -0700152 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700153 spin_unlock_bh(&ul->lock);
154 }
155}
156
157static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
158{
159 struct net_device *loopback_dev = net->loopback_dev;
160 int cpu;
161
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500162 if (dev == loopback_dev)
163 return;
164
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700165 for_each_possible_cpu(cpu) {
166 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
167 struct rt6_info *rt;
168
169 spin_lock_bh(&ul->lock);
170 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
171 struct inet6_dev *rt_idev = rt->rt6i_idev;
172 struct net_device *rt_dev = rt->dst.dev;
173
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500174 if (rt_idev->dev == dev) {
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700175 rt->rt6i_idev = in6_dev_get(loopback_dev);
176 in6_dev_put(rt_idev);
177 }
178
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500179 if (rt_dev == dev) {
Mahesh Bandewar8d7017f2019-07-01 14:38:57 -0700180 rt->dst.dev = blackhole_netdev;
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700181 dev_hold(rt->dst.dev);
182 dev_put(rt_dev);
183 }
184 }
185 spin_unlock_bh(&ul->lock);
186 }
187}
188
David Ahernf8a1b432018-04-17 17:33:21 -0700189static inline const void *choose_neigh_daddr(const struct in6_addr *p,
David S. Millerf894cbf2012-07-02 21:52:24 -0700190 struct sk_buff *skb,
191 const void *daddr)
David S. Miller39232972012-01-26 15:22:32 -0500192{
David S. Millera7563f32012-01-26 16:29:16 -0500193 if (!ipv6_addr_any(p))
David S. Miller39232972012-01-26 15:22:32 -0500194 return (const void *) p;
David S. Millerf894cbf2012-07-02 21:52:24 -0700195 else if (skb)
196 return &ipv6_hdr(skb)->daddr;
David S. Miller39232972012-01-26 15:22:32 -0500197 return daddr;
198}
199
David Ahernf8a1b432018-04-17 17:33:21 -0700200struct neighbour *ip6_neigh_lookup(const struct in6_addr *gw,
201 struct net_device *dev,
202 struct sk_buff *skb,
203 const void *daddr)
David S. Millerd3aaeb32011-07-18 00:40:17 -0700204{
David S. Miller39232972012-01-26 15:22:32 -0500205 struct neighbour *n;
206
David Ahernf8a1b432018-04-17 17:33:21 -0700207 daddr = choose_neigh_daddr(gw, skb, daddr);
208 n = __ipv6_neigh_lookup(dev, daddr);
David S. Millerf83c7792011-12-28 15:41:23 -0500209 if (n)
210 return n;
Stefano Brivio7adf3242019-01-02 13:29:27 +0100211
212 n = neigh_create(&nd_tbl, daddr, dev);
213 return IS_ERR(n) ? NULL : n;
David Ahernf8a1b432018-04-17 17:33:21 -0700214}
215
216static struct neighbour *ip6_dst_neigh_lookup(const struct dst_entry *dst,
217 struct sk_buff *skb,
218 const void *daddr)
219{
220 const struct rt6_info *rt = container_of(dst, struct rt6_info, dst);
221
Nicolas Dichtel2c6b55f2019-06-24 16:01:09 +0200222 return ip6_neigh_lookup(rt6_nexthop(rt, &in6addr_any),
223 dst->dev, skb, daddr);
David S. Millerf83c7792011-12-28 15:41:23 -0500224}
225
Julian Anastasov63fca652017-02-06 23:14:15 +0200226static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
227{
228 struct net_device *dev = dst->dev;
229 struct rt6_info *rt = (struct rt6_info *)dst;
230
Stefano Briviocbfd6892019-09-09 22:44:06 +0200231 daddr = choose_neigh_daddr(rt6_nexthop(rt, &in6addr_any), NULL, daddr);
Julian Anastasov63fca652017-02-06 23:14:15 +0200232 if (!daddr)
233 return;
234 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
235 return;
236 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
237 return;
238 __ipv6_confirm_neigh(dev, daddr);
239}
240
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -0800241static struct dst_ops ip6_dst_ops_template = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 .gc = ip6_dst_gc,
244 .gc_thresh = 1024,
245 .check = ip6_dst_check,
David S. Miller0dbaee32010-12-13 12:52:14 -0800246 .default_advmss = ip6_default_advmss,
Steffen Klassertebb762f2011-11-23 02:12:51 +0000247 .mtu = ip6_mtu,
David Ahernd4ead6b2018-04-17 17:33:16 -0700248 .cow_metrics = dst_cow_metrics_generic,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 .destroy = ip6_dst_destroy,
250 .ifdown = ip6_dst_ifdown,
251 .negative_advice = ip6_negative_advice,
252 .link_failure = ip6_link_failure,
253 .update_pmtu = ip6_rt_update_pmtu,
David S. Miller6e157b62012-07-12 00:05:02 -0700254 .redirect = rt6_do_redirect,
Eric W. Biederman9f8955c2015-10-07 16:48:39 -0500255 .local_out = __ip6_local_out,
David Ahernf8a1b432018-04-17 17:33:21 -0700256 .neigh_lookup = ip6_dst_neigh_lookup,
Julian Anastasov63fca652017-02-06 23:14:15 +0200257 .confirm_neigh = ip6_confirm_neigh,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258};
259
Steffen Klassertebb762f2011-11-23 02:12:51 +0000260static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
Roland Dreierec831ea2011-01-31 13:16:00 -0800261{
Steffen Klassert618f9bc2011-11-23 02:13:31 +0000262 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
263
264 return mtu ? : dst->dev->mtu;
Roland Dreierec831ea2011-01-31 13:16:00 -0800265}
266
David S. Miller6700c272012-07-17 03:29:28 -0700267static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
Hangbin Liubd085ef2019-12-22 10:51:09 +0800268 struct sk_buff *skb, u32 mtu,
269 bool confirm_neigh)
David S. Miller14e50e52007-05-24 18:17:54 -0700270{
271}
272
David S. Miller6700c272012-07-17 03:29:28 -0700273static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
274 struct sk_buff *skb)
David S. Millerb587ee32012-07-12 00:39:24 -0700275{
276}
277
David S. Miller14e50e52007-05-24 18:17:54 -0700278static struct dst_ops ip6_dst_blackhole_ops = {
279 .family = AF_INET6,
David S. Miller14e50e52007-05-24 18:17:54 -0700280 .destroy = ip6_dst_destroy,
281 .check = ip6_dst_check,
Steffen Klassertebb762f2011-11-23 02:12:51 +0000282 .mtu = ip6_blackhole_mtu,
Eric Dumazet214f45c2011-02-18 11:39:01 -0800283 .default_advmss = ip6_default_advmss,
David S. Miller14e50e52007-05-24 18:17:54 -0700284 .update_pmtu = ip6_rt_blackhole_update_pmtu,
David S. Millerb587ee32012-07-12 00:39:24 -0700285 .redirect = ip6_rt_blackhole_redirect,
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -0700286 .cow_metrics = dst_cow_metrics_generic,
David Ahernf8a1b432018-04-17 17:33:21 -0700287 .neigh_lookup = ip6_dst_neigh_lookup,
David S. Miller14e50e52007-05-24 18:17:54 -0700288};
289
David S. Miller62fa8a82011-01-26 20:51:05 -0800290static const u32 ip6_template_metrics[RTAX_MAX] = {
Li RongQing14edd872012-10-24 14:01:18 +0800291 [RTAX_HOPLIMIT - 1] = 0,
David S. Miller62fa8a82011-01-26 20:51:05 -0800292};
293
David Ahern8d1c8022018-04-17 17:33:26 -0700294static const struct fib6_info fib6_null_entry_template = {
David Ahern93c2fb22018-04-18 15:38:59 -0700295 .fib6_flags = (RTF_REJECT | RTF_NONEXTHOP),
296 .fib6_protocol = RTPROT_KERNEL,
297 .fib6_metric = ~(u32)0,
Eric Dumazetf05713e2019-04-22 18:35:03 -0700298 .fib6_ref = REFCOUNT_INIT(1),
David Ahern421842e2018-04-17 17:33:18 -0700299 .fib6_type = RTN_UNREACHABLE,
300 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
301};
302
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000303static const struct rt6_info ip6_null_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700304 .dst = {
305 .__refcnt = ATOMIC_INIT(1),
306 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000307 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700308 .error = -ENETUNREACH,
Changli Gaod8d1f302010-06-10 23:31:35 -0700309 .input = ip6_pkt_discard,
310 .output = ip6_pkt_discard_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 },
312 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313};
314
Thomas Graf101367c2006-08-04 03:39:02 -0700315#ifdef CONFIG_IPV6_MULTIPLE_TABLES
316
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000317static const struct rt6_info ip6_prohibit_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700318 .dst = {
319 .__refcnt = ATOMIC_INIT(1),
320 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000321 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700322 .error = -EACCES,
Changli Gaod8d1f302010-06-10 23:31:35 -0700323 .input = ip6_pkt_prohibit,
324 .output = ip6_pkt_prohibit_out,
Thomas Graf101367c2006-08-04 03:39:02 -0700325 },
326 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Thomas Graf101367c2006-08-04 03:39:02 -0700327};
328
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000329static const struct rt6_info ip6_blk_hole_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700330 .dst = {
331 .__refcnt = ATOMIC_INIT(1),
332 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000333 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700334 .error = -EINVAL,
Changli Gaod8d1f302010-06-10 23:31:35 -0700335 .input = dst_discard,
Eric W. Biedermanede20592015-10-07 16:48:47 -0500336 .output = dst_discard_out,
Thomas Graf101367c2006-08-04 03:39:02 -0700337 },
338 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Thomas Graf101367c2006-08-04 03:39:02 -0700339};
340
341#endif
342
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700343static void rt6_info_init(struct rt6_info *rt)
344{
345 struct dst_entry *dst = &rt->dst;
346
347 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700348 INIT_LIST_HEAD(&rt->rt6i_uncached);
349}
350
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351/* allocate dst with ip6_dst_ops */
David Ahern93531c62018-04-17 17:33:25 -0700352struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev,
353 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354{
David S. Miller97bab732012-06-09 22:36:36 -0700355 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
Wei Wangb2a9c0e2017-06-17 10:42:41 -0700356 1, DST_OBSOLETE_FORCE_CHK, flags);
David S. Millercf911662011-04-28 14:31:47 -0700357
Wei Wang81eb8442017-10-06 12:06:11 -0700358 if (rt) {
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700359 rt6_info_init(rt);
Wei Wang81eb8442017-10-06 12:06:11 -0700360 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
361 }
Steffen Klassert81048912012-07-05 23:37:09 +0000362
David S. Millercf911662011-04-28 14:31:47 -0700363 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
David Ahern9ab179d2016-04-07 11:10:06 -0700365EXPORT_SYMBOL(ip6_dst_alloc);
Martin KaFai Laud52d3992015-05-22 20:56:06 -0700366
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367static void ip6_dst_destroy(struct dst_entry *dst)
368{
369 struct rt6_info *rt = (struct rt6_info *)dst;
David Aherna68886a2018-04-20 15:38:02 -0700370 struct fib6_info *from;
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700371 struct inet6_dev *idev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372
David Ahern1620a332018-10-04 20:07:54 -0700373 ip_dst_metrics_put(dst);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700374 rt6_uncached_list_del(rt);
375
376 idev = rt->rt6i_idev;
David S. Miller38308472011-12-03 18:02:47 -0500377 if (idev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 rt->rt6i_idev = NULL;
379 in6_dev_put(idev);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900380 }
Gao feng1716a962012-04-06 00:13:10 +0000381
Eric Dumazet0e233872019-04-28 12:22:25 -0700382 from = xchg((__force struct fib6_info **)&rt->from, NULL);
David Ahern93531c62018-04-17 17:33:25 -0700383 fib6_info_release(from);
David S. Millerb3419362010-11-30 12:27:11 -0800384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
387 int how)
388{
389 struct rt6_info *rt = (struct rt6_info *)dst;
390 struct inet6_dev *idev = rt->rt6i_idev;
Denis V. Lunev5a3e55d2007-12-07 00:38:10 -0800391 struct net_device *loopback_dev =
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900392 dev_net(dev)->loopback_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Wei Wange5645f52017-08-14 10:44:59 -0700394 if (idev && idev->dev != loopback_dev) {
395 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
396 if (loopback_idev) {
397 rt->rt6i_idev = loopback_idev;
398 in6_dev_put(idev);
David S. Miller97cac082012-07-02 22:43:47 -0700399 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 }
401}
402
Martin KaFai Lau5973fb12015-11-11 11:51:07 -0800403static bool __rt6_check_expired(const struct rt6_info *rt)
404{
405 if (rt->rt6i_flags & RTF_EXPIRES)
406 return time_after(jiffies, rt->dst.expires);
407 else
408 return false;
409}
410
Eric Dumazeta50feda2012-05-18 18:57:34 +0000411static bool rt6_check_expired(const struct rt6_info *rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412{
David Aherna68886a2018-04-20 15:38:02 -0700413 struct fib6_info *from;
414
415 from = rcu_dereference(rt->from);
416
Gao feng1716a962012-04-06 00:13:10 +0000417 if (rt->rt6i_flags & RTF_EXPIRES) {
418 if (time_after(jiffies, rt->dst.expires))
Eric Dumazeta50feda2012-05-18 18:57:34 +0000419 return true;
David Aherna68886a2018-04-20 15:38:02 -0700420 } else if (from) {
Xin Long1e2ea8a2017-08-26 20:10:10 +0800421 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
David Aherna68886a2018-04-20 15:38:02 -0700422 fib6_check_expired(from);
Gao feng1716a962012-04-06 00:13:10 +0000423 }
Eric Dumazeta50feda2012-05-18 18:57:34 +0000424 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425}
426
David Ahernb1d40992019-04-16 14:35:59 -0700427void fib6_select_path(const struct net *net, struct fib6_result *res,
428 struct flowi6 *fl6, int oif, bool have_oif_match,
429 const struct sk_buff *skb, int strict)
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000430{
David Ahern8d1c8022018-04-17 17:33:26 -0700431 struct fib6_info *sibling, *next_sibling;
David Ahernb1d40992019-04-16 14:35:59 -0700432 struct fib6_info *match = res->f6i;
433
David Ahernf88d8ea2019-06-03 20:19:52 -0700434 if ((!match->fib6_nsiblings && !match->nh) || have_oif_match)
David Ahernb1d40992019-04-16 14:35:59 -0700435 goto out;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000436
Jakub Sitnickib673d6c2017-08-23 09:58:31 +0200437 /* We might have already computed the hash for ICMPv6 errors. In such
438 * case it will always be non-zero. Otherwise now is the time to do it.
439 */
David Ahernf88d8ea2019-06-03 20:19:52 -0700440 if (!fl6->mp_hash &&
441 (!match->nh || nexthop_is_multipath(match->nh)))
David Ahernb4bac172018-03-02 08:32:18 -0800442 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
Jakub Sitnickib673d6c2017-08-23 09:58:31 +0200443
David Ahernf88d8ea2019-06-03 20:19:52 -0700444 if (unlikely(match->nh)) {
445 nexthop_path_fib6_result(res, fl6->mp_hash);
446 return;
447 }
448
David Ahern1cf844c2019-05-22 20:27:59 -0700449 if (fl6->mp_hash <= atomic_read(&match->fib6_nh->fib_nh_upper_bound))
David Ahernb1d40992019-04-16 14:35:59 -0700450 goto out;
Ido Schimmelbbfcd772017-11-21 09:50:12 +0200451
David Ahern93c2fb22018-04-18 15:38:59 -0700452 list_for_each_entry_safe(sibling, next_sibling, &match->fib6_siblings,
453 fib6_siblings) {
David Ahern1cf844c2019-05-22 20:27:59 -0700454 const struct fib6_nh *nh = sibling->fib6_nh;
David Ahern5e670d82018-04-17 17:33:14 -0700455 int nh_upper_bound;
456
David Ahern702cea52019-04-09 14:41:13 -0700457 nh_upper_bound = atomic_read(&nh->fib_nh_upper_bound);
David Ahern5e670d82018-04-17 17:33:14 -0700458 if (fl6->mp_hash > nh_upper_bound)
Ido Schimmel3d709f62018-01-09 16:40:27 +0200459 continue;
David Ahern702cea52019-04-09 14:41:13 -0700460 if (rt6_score_route(nh, sibling->fib6_flags, oif, strict) < 0)
Ido Schimmel3d709f62018-01-09 16:40:27 +0200461 break;
462 match = sibling;
463 break;
464 }
465
David Ahernb1d40992019-04-16 14:35:59 -0700466out:
467 res->f6i = match;
David Ahern1cf844c2019-05-22 20:27:59 -0700468 res->nh = match->fib6_nh;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000469}
470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471/*
Wei Wang66f5d6c2017-10-06 12:06:10 -0700472 * Route lookup. rcu_read_lock() should be held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 */
474
David Ahern0c59d002019-04-09 14:41:18 -0700475static bool __rt6_device_match(struct net *net, const struct fib6_nh *nh,
476 const struct in6_addr *saddr, int oif, int flags)
477{
478 const struct net_device *dev;
479
480 if (nh->fib_nh_flags & RTNH_F_DEAD)
481 return false;
482
483 dev = nh->fib_nh_dev;
484 if (oif) {
485 if (dev->ifindex == oif)
486 return true;
487 } else {
488 if (ipv6_chk_addr(net, saddr, dev,
489 flags & RT6_LOOKUP_F_IFACE))
490 return true;
491 }
492
493 return false;
494}
495
David Ahern962b6802019-06-08 14:53:24 -0700496struct fib6_nh_dm_arg {
497 struct net *net;
498 const struct in6_addr *saddr;
499 int oif;
500 int flags;
501 struct fib6_nh *nh;
502};
503
504static int __rt6_nh_dev_match(struct fib6_nh *nh, void *_arg)
505{
506 struct fib6_nh_dm_arg *arg = _arg;
507
508 arg->nh = nh;
509 return __rt6_device_match(arg->net, nh, arg->saddr, arg->oif,
510 arg->flags);
511}
512
513/* returns fib6_nh from nexthop or NULL */
514static struct fib6_nh *rt6_nh_dev_match(struct net *net, struct nexthop *nh,
515 struct fib6_result *res,
516 const struct in6_addr *saddr,
517 int oif, int flags)
518{
519 struct fib6_nh_dm_arg arg = {
520 .net = net,
521 .saddr = saddr,
522 .oif = oif,
523 .flags = flags,
524 };
525
526 if (nexthop_is_blackhole(nh))
527 return NULL;
528
529 if (nexthop_for_each_fib6_nh(nh, __rt6_nh_dev_match, &arg))
530 return arg.nh;
531
532 return NULL;
533}
534
David Ahern75ef7382019-04-16 14:36:07 -0700535static void rt6_device_match(struct net *net, struct fib6_result *res,
536 const struct in6_addr *saddr, int oif, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537{
David Ahern75ef7382019-04-16 14:36:07 -0700538 struct fib6_info *f6i = res->f6i;
539 struct fib6_info *spf6i;
540 struct fib6_nh *nh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
David Ahern75ef7382019-04-16 14:36:07 -0700542 if (!oif && ipv6_addr_any(saddr)) {
David Ahernf88d8ea2019-06-03 20:19:52 -0700543 if (unlikely(f6i->nh)) {
544 nh = nexthop_fib6_nh(f6i->nh);
545 if (nexthop_is_blackhole(f6i->nh))
546 goto out_blackhole;
547 } else {
548 nh = f6i->fib6_nh;
549 }
David Ahern7d21fec2019-04-16 14:36:11 -0700550 if (!(nh->fib_nh_flags & RTNH_F_DEAD))
551 goto out;
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
David Ahern75ef7382019-04-16 14:36:07 -0700554 for (spf6i = f6i; spf6i; spf6i = rcu_dereference(spf6i->fib6_next)) {
David Ahern962b6802019-06-08 14:53:24 -0700555 bool matched = false;
556
557 if (unlikely(spf6i->nh)) {
558 nh = rt6_nh_dev_match(net, spf6i->nh, res, saddr,
559 oif, flags);
560 if (nh)
561 matched = true;
562 } else {
563 nh = spf6i->fib6_nh;
564 if (__rt6_device_match(net, nh, saddr, oif, flags))
565 matched = true;
566 }
567 if (matched) {
David Ahern75ef7382019-04-16 14:36:07 -0700568 res->f6i = spf6i;
David Ahern7d21fec2019-04-16 14:36:11 -0700569 goto out;
David Ahern75ef7382019-04-16 14:36:07 -0700570 }
571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
David Ahern75ef7382019-04-16 14:36:07 -0700573 if (oif && flags & RT6_LOOKUP_F_IFACE) {
574 res->f6i = net->ipv6.fib6_null_entry;
David Ahern1cf844c2019-05-22 20:27:59 -0700575 nh = res->f6i->fib6_nh;
David Ahern7d21fec2019-04-16 14:36:11 -0700576 goto out;
David Ahern75ef7382019-04-16 14:36:07 -0700577 }
578
David Ahernf88d8ea2019-06-03 20:19:52 -0700579 if (unlikely(f6i->nh)) {
580 nh = nexthop_fib6_nh(f6i->nh);
581 if (nexthop_is_blackhole(f6i->nh))
582 goto out_blackhole;
583 } else {
584 nh = f6i->fib6_nh;
585 }
586
David Ahern7d21fec2019-04-16 14:36:11 -0700587 if (nh->fib_nh_flags & RTNH_F_DEAD) {
David Ahern75ef7382019-04-16 14:36:07 -0700588 res->f6i = net->ipv6.fib6_null_entry;
David Ahern1cf844c2019-05-22 20:27:59 -0700589 nh = res->f6i->fib6_nh;
David Ahern75ef7382019-04-16 14:36:07 -0700590 }
David Ahern7d21fec2019-04-16 14:36:11 -0700591out:
592 res->nh = nh;
593 res->fib6_type = res->f6i->fib6_type;
594 res->fib6_flags = res->f6i->fib6_flags;
David Ahernf88d8ea2019-06-03 20:19:52 -0700595 return;
596
597out_blackhole:
598 res->fib6_flags |= RTF_REJECT;
599 res->fib6_type = RTN_BLACKHOLE;
600 res->nh = nh;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601}
602
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800603#ifdef CONFIG_IPV6_ROUTER_PREF
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200604struct __rt6_probe_work {
605 struct work_struct work;
606 struct in6_addr target;
607 struct net_device *dev;
608};
609
610static void rt6_probe_deferred(struct work_struct *w)
611{
612 struct in6_addr mcaddr;
613 struct __rt6_probe_work *work =
614 container_of(w, struct __rt6_probe_work, work);
615
616 addrconf_addr_solict_mult(&work->target, &mcaddr);
Erik Nordmarkadc176c2016-12-02 14:00:08 -0800617 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200618 dev_put(work->dev);
Michael Büsch662f5532015-02-08 10:14:07 +0100619 kfree(work);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200620}
621
David Aherncc3a86c2019-04-09 14:41:12 -0700622static void rt6_probe(struct fib6_nh *fib6_nh)
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800623{
Sabrina Dubrocaf547fac2018-10-12 16:22:47 +0200624 struct __rt6_probe_work *work = NULL;
David Ahern5e670d82018-04-17 17:33:14 -0700625 const struct in6_addr *nh_gw;
Eric Dumazet1bef4c22019-11-07 09:26:19 -0800626 unsigned long last_probe;
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000627 struct neighbour *neigh;
David Ahern5e670d82018-04-17 17:33:14 -0700628 struct net_device *dev;
Sabrina Dubrocaf547fac2018-10-12 16:22:47 +0200629 struct inet6_dev *idev;
David Ahern5e670d82018-04-17 17:33:14 -0700630
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800631 /*
632 * Okay, this does not seem to be appropriate
633 * for now, however, we need to check if it
634 * is really so; aka Router Reachability Probing.
635 *
636 * Router Reachability Probe MUST be rate-limited
637 * to no more than one per minute.
638 */
Hangbin Liu004b3942019-11-20 15:39:06 +0800639 if (!fib6_nh->fib_nh_gw_family)
Amerigo Wangfdd66812012-09-10 02:48:44 +0000640 return;
David Ahern5e670d82018-04-17 17:33:14 -0700641
David Aherncc3a86c2019-04-09 14:41:12 -0700642 nh_gw = &fib6_nh->fib_nh_gw6;
643 dev = fib6_nh->fib_nh_dev;
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000644 rcu_read_lock_bh();
Eric Dumazet1bef4c22019-11-07 09:26:19 -0800645 last_probe = READ_ONCE(fib6_nh->last_probe);
Sabrina Dubrocaf547fac2018-10-12 16:22:47 +0200646 idev = __in6_dev_get(dev);
David Ahern5e670d82018-04-17 17:33:14 -0700647 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000648 if (neigh) {
Martin KaFai Lau8d6c31b2015-07-24 09:57:43 -0700649 if (neigh->nud_state & NUD_VALID)
650 goto out;
651
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000652 write_lock(&neigh->lock);
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700653 if (!(neigh->nud_state & NUD_VALID) &&
654 time_after(jiffies,
David Aherndcd1f572018-04-18 15:39:05 -0700655 neigh->updated + idev->cnf.rtr_probe_interval)) {
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700656 work = kmalloc(sizeof(*work), GFP_ATOMIC);
657 if (work)
658 __neigh_set_probe_once(neigh);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200659 }
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000660 write_unlock(&neigh->lock);
Eric Dumazet1bef4c22019-11-07 09:26:19 -0800661 } else if (time_after(jiffies, last_probe +
Sabrina Dubrocaf547fac2018-10-12 16:22:47 +0200662 idev->cnf.rtr_probe_interval)) {
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700663 work = kmalloc(sizeof(*work), GFP_ATOMIC);
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000664 }
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700665
Eric Dumazet1bef4c22019-11-07 09:26:19 -0800666 if (!work || cmpxchg(&fib6_nh->last_probe,
667 last_probe, jiffies) != last_probe) {
668 kfree(work);
669 } else {
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700670 INIT_WORK(&work->work, rt6_probe_deferred);
David Ahern5e670d82018-04-17 17:33:14 -0700671 work->target = *nh_gw;
672 dev_hold(dev);
673 work->dev = dev;
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700674 schedule_work(&work->work);
675 }
676
Martin KaFai Lau8d6c31b2015-07-24 09:57:43 -0700677out:
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000678 rcu_read_unlock_bh();
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800679}
680#else
David Aherncc3a86c2019-04-09 14:41:12 -0700681static inline void rt6_probe(struct fib6_nh *fib6_nh)
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800682{
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800683}
684#endif
685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686/*
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800687 * Default Router Selection (RFC 2461 6.3.6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 */
David Ahern1ba9a892019-04-09 14:41:10 -0700689static enum rt6_nud_state rt6_check_neigh(const struct fib6_nh *fib6_nh)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690{
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200691 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
David Ahern5e670d82018-04-17 17:33:14 -0700692 struct neighbour *neigh;
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000693
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000694 rcu_read_lock_bh();
David Ahern1ba9a892019-04-09 14:41:10 -0700695 neigh = __ipv6_neigh_lookup_noref(fib6_nh->fib_nh_dev,
696 &fib6_nh->fib_nh_gw6);
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000697 if (neigh) {
698 read_lock(&neigh->lock);
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800699 if (neigh->nud_state & NUD_VALID)
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200700 ret = RT6_NUD_SUCCEED;
YOSHIFUJI Hideaki398bcbe2008-01-19 00:35:16 -0800701#ifdef CONFIG_IPV6_ROUTER_PREF
Paul Marksa5a81f02012-12-03 10:26:54 +0000702 else if (!(neigh->nud_state & NUD_FAILED))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200703 ret = RT6_NUD_SUCCEED;
Jiri Benc7e980562013-12-11 13:48:20 +0100704 else
705 ret = RT6_NUD_FAIL_PROBE;
YOSHIFUJI Hideaki398bcbe2008-01-19 00:35:16 -0800706#endif
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000707 read_unlock(&neigh->lock);
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200708 } else {
709 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
Jiri Benc7e980562013-12-11 13:48:20 +0100710 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
Paul Marksa5a81f02012-12-03 10:26:54 +0000711 }
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000712 rcu_read_unlock_bh();
713
Paul Marksa5a81f02012-12-03 10:26:54 +0000714 return ret;
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800715}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
David Ahern702cea52019-04-09 14:41:13 -0700717static int rt6_score_route(const struct fib6_nh *nh, u32 fib6_flags, int oif,
718 int strict)
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800719{
David Ahern6e1809a2019-04-09 14:41:11 -0700720 int m = 0;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900721
David Ahern6e1809a2019-04-09 14:41:11 -0700722 if (!oif || nh->fib_nh_dev->ifindex == oif)
723 m = 2;
724
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -0700725 if (!m && (strict & RT6_LOOKUP_F_IFACE))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200726 return RT6_NUD_FAIL_HARD;
YOSHIFUJI Hideakiebacaaa2006-03-20 17:04:53 -0800727#ifdef CONFIG_IPV6_ROUTER_PREF
David Ahern702cea52019-04-09 14:41:13 -0700728 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(fib6_flags)) << 2;
YOSHIFUJI Hideakiebacaaa2006-03-20 17:04:53 -0800729#endif
David Ahern1ba9a892019-04-09 14:41:10 -0700730 if ((strict & RT6_LOOKUP_F_REACHABLE) &&
David Ahern702cea52019-04-09 14:41:13 -0700731 !(fib6_flags & RTF_NONEXTHOP) && nh->fib_nh_gw_family) {
David Ahern1ba9a892019-04-09 14:41:10 -0700732 int n = rt6_check_neigh(nh);
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200733 if (n < 0)
734 return n;
735 }
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800736 return m;
737}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
David Ahern28679ed2019-04-09 14:41:14 -0700739static bool find_match(struct fib6_nh *nh, u32 fib6_flags,
740 int oif, int strict, int *mpri, bool *do_rr)
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800741{
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200742 bool match_do_rr = false;
David Ahern28679ed2019-04-09 14:41:14 -0700743 bool rc = false;
744 int m;
Andy Gospodarek35103d12015-08-13 10:39:01 -0400745
David Ahern28679ed2019-04-09 14:41:14 -0700746 if (nh->fib_nh_flags & RTNH_F_DEAD)
Ido Schimmel8067bb82018-01-07 12:45:09 +0200747 goto out;
748
David Ahern28679ed2019-04-09 14:41:14 -0700749 if (ip6_ignore_linkdown(nh->fib_nh_dev) &&
750 nh->fib_nh_flags & RTNH_F_LINKDOWN &&
David Ahernd5d32e42016-10-24 12:27:23 -0700751 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
Andy Gospodarek35103d12015-08-13 10:39:01 -0400752 goto out;
David S. Millerf11e6652007-03-24 20:36:25 -0700753
David Ahern28679ed2019-04-09 14:41:14 -0700754 m = rt6_score_route(nh, fib6_flags, oif, strict);
Jiri Benc7e980562013-12-11 13:48:20 +0100755 if (m == RT6_NUD_FAIL_DO_RR) {
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200756 match_do_rr = true;
757 m = 0; /* lowest valid score */
Jiri Benc7e980562013-12-11 13:48:20 +0100758 } else if (m == RT6_NUD_FAIL_HARD) {
David S. Millerf11e6652007-03-24 20:36:25 -0700759 goto out;
David S. Millerf11e6652007-03-24 20:36:25 -0700760 }
761
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200762 if (strict & RT6_LOOKUP_F_REACHABLE)
David Ahern28679ed2019-04-09 14:41:14 -0700763 rt6_probe(nh);
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200764
Jiri Benc7e980562013-12-11 13:48:20 +0100765 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200766 if (m > *mpri) {
767 *do_rr = match_do_rr;
768 *mpri = m;
David Ahern28679ed2019-04-09 14:41:14 -0700769 rc = true;
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200770 }
David S. Millerf11e6652007-03-24 20:36:25 -0700771out:
David Ahern28679ed2019-04-09 14:41:14 -0700772 return rc;
David S. Millerf11e6652007-03-24 20:36:25 -0700773}
774
David Ahern17a59842019-06-08 14:53:25 -0700775struct fib6_nh_frl_arg {
776 u32 flags;
777 int oif;
778 int strict;
779 int *mpri;
780 bool *do_rr;
781 struct fib6_nh *nh;
782};
783
784static int rt6_nh_find_match(struct fib6_nh *nh, void *_arg)
785{
786 struct fib6_nh_frl_arg *arg = _arg;
787
788 arg->nh = nh;
789 return find_match(nh, arg->flags, arg->oif, arg->strict,
790 arg->mpri, arg->do_rr);
791}
792
David Ahernb7bc4b62019-04-16 14:36:08 -0700793static void __find_rr_leaf(struct fib6_info *f6i_start,
David Ahern30c15f02019-04-09 14:41:15 -0700794 struct fib6_info *nomatch, u32 metric,
David Ahernb7bc4b62019-04-16 14:36:08 -0700795 struct fib6_result *res, struct fib6_info **cont,
David Ahern30c15f02019-04-09 14:41:15 -0700796 int oif, int strict, bool *do_rr, int *mpri)
David S. Millerf11e6652007-03-24 20:36:25 -0700797{
David Ahernb7bc4b62019-04-16 14:36:08 -0700798 struct fib6_info *f6i;
David Ahern30c15f02019-04-09 14:41:15 -0700799
David Ahernb7bc4b62019-04-16 14:36:08 -0700800 for (f6i = f6i_start;
801 f6i && f6i != nomatch;
802 f6i = rcu_dereference(f6i->fib6_next)) {
David Ahern17a59842019-06-08 14:53:25 -0700803 bool matched = false;
David Ahern30c15f02019-04-09 14:41:15 -0700804 struct fib6_nh *nh;
805
David Ahernb7bc4b62019-04-16 14:36:08 -0700806 if (cont && f6i->fib6_metric != metric) {
807 *cont = f6i;
David Ahern30c15f02019-04-09 14:41:15 -0700808 return;
809 }
810
David Ahernb7bc4b62019-04-16 14:36:08 -0700811 if (fib6_check_expired(f6i))
David Ahern30c15f02019-04-09 14:41:15 -0700812 continue;
813
David Ahern17a59842019-06-08 14:53:25 -0700814 if (unlikely(f6i->nh)) {
815 struct fib6_nh_frl_arg arg = {
816 .flags = f6i->fib6_flags,
817 .oif = oif,
818 .strict = strict,
819 .mpri = mpri,
820 .do_rr = do_rr
821 };
822
823 if (nexthop_is_blackhole(f6i->nh)) {
824 res->fib6_flags = RTF_REJECT;
825 res->fib6_type = RTN_BLACKHOLE;
826 res->f6i = f6i;
827 res->nh = nexthop_fib6_nh(f6i->nh);
828 return;
829 }
830 if (nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_find_match,
831 &arg)) {
832 matched = true;
833 nh = arg.nh;
834 }
835 } else {
836 nh = f6i->fib6_nh;
837 if (find_match(nh, f6i->fib6_flags, oif, strict,
838 mpri, do_rr))
839 matched = true;
840 }
841 if (matched) {
David Ahernb7bc4b62019-04-16 14:36:08 -0700842 res->f6i = f6i;
843 res->nh = nh;
David Ahern7d21fec2019-04-16 14:36:11 -0700844 res->fib6_flags = f6i->fib6_flags;
845 res->fib6_type = f6i->fib6_type;
David Ahernb7bc4b62019-04-16 14:36:08 -0700846 }
David Ahern30c15f02019-04-09 14:41:15 -0700847 }
848}
849
David Ahernb7bc4b62019-04-16 14:36:08 -0700850static void find_rr_leaf(struct fib6_node *fn, struct fib6_info *leaf,
851 struct fib6_info *rr_head, int oif, int strict,
852 bool *do_rr, struct fib6_result *res)
David Ahern30c15f02019-04-09 14:41:15 -0700853{
David Ahernb7bc4b62019-04-16 14:36:08 -0700854 u32 metric = rr_head->fib6_metric;
855 struct fib6_info *cont = NULL;
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800856 int mpri = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
David Ahernb7bc4b62019-04-16 14:36:08 -0700858 __find_rr_leaf(rr_head, NULL, metric, res, &cont,
David Ahern30c15f02019-04-09 14:41:15 -0700859 oif, strict, do_rr, &mpri);
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700860
David Ahernb7bc4b62019-04-16 14:36:08 -0700861 __find_rr_leaf(leaf, rr_head, metric, res, &cont,
David Ahern30c15f02019-04-09 14:41:15 -0700862 oif, strict, do_rr, &mpri);
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700863
David Ahernb7bc4b62019-04-16 14:36:08 -0700864 if (res->f6i || !cont)
865 return;
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700866
David Ahernb7bc4b62019-04-16 14:36:08 -0700867 __find_rr_leaf(cont, NULL, metric, res, NULL,
David Ahern30c15f02019-04-09 14:41:15 -0700868 oif, strict, do_rr, &mpri);
David S. Millerf11e6652007-03-24 20:36:25 -0700869}
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800870
David Ahernb7bc4b62019-04-16 14:36:08 -0700871static void rt6_select(struct net *net, struct fib6_node *fn, int oif,
872 struct fib6_result *res, int strict)
David S. Millerf11e6652007-03-24 20:36:25 -0700873{
David Ahern8d1c8022018-04-17 17:33:26 -0700874 struct fib6_info *leaf = rcu_dereference(fn->leaf);
David Ahernb7bc4b62019-04-16 14:36:08 -0700875 struct fib6_info *rt0;
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200876 bool do_rr = false;
Wei Wang17ecf592017-10-06 12:06:09 -0700877 int key_plen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
David Ahernb7bc4b62019-04-16 14:36:08 -0700879 /* make sure this function or its helpers sets f6i */
880 res->f6i = NULL;
881
David Ahern421842e2018-04-17 17:33:18 -0700882 if (!leaf || leaf == net->ipv6.fib6_null_entry)
David Ahernb7bc4b62019-04-16 14:36:08 -0700883 goto out;
Wei Wang8d1040e2017-10-06 12:06:08 -0700884
Wei Wang66f5d6c2017-10-06 12:06:10 -0700885 rt0 = rcu_dereference(fn->rr_ptr);
David S. Millerf11e6652007-03-24 20:36:25 -0700886 if (!rt0)
Wei Wang66f5d6c2017-10-06 12:06:10 -0700887 rt0 = leaf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
Wei Wang17ecf592017-10-06 12:06:09 -0700889 /* Double check to make sure fn is not an intermediate node
890 * and fn->leaf does not points to its child's leaf
891 * (This might happen if all routes under fn are deleted from
892 * the tree and fib6_repair_tree() is called on the node.)
893 */
David Ahern93c2fb22018-04-18 15:38:59 -0700894 key_plen = rt0->fib6_dst.plen;
Wei Wang17ecf592017-10-06 12:06:09 -0700895#ifdef CONFIG_IPV6_SUBTREES
David Ahern93c2fb22018-04-18 15:38:59 -0700896 if (rt0->fib6_src.plen)
897 key_plen = rt0->fib6_src.plen;
Wei Wang17ecf592017-10-06 12:06:09 -0700898#endif
899 if (fn->fn_bit != key_plen)
David Ahernb7bc4b62019-04-16 14:36:08 -0700900 goto out;
Wei Wang17ecf592017-10-06 12:06:09 -0700901
David Ahernb7bc4b62019-04-16 14:36:08 -0700902 find_rr_leaf(fn, leaf, rt0, oif, strict, &do_rr, res);
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200903 if (do_rr) {
David Ahern8fb11a92018-05-04 13:54:24 -0700904 struct fib6_info *next = rcu_dereference(rt0->fib6_next);
David S. Millerf11e6652007-03-24 20:36:25 -0700905
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800906 /* no entries matched; do round-robin */
David Ahern93c2fb22018-04-18 15:38:59 -0700907 if (!next || next->fib6_metric != rt0->fib6_metric)
Wei Wang8d1040e2017-10-06 12:06:08 -0700908 next = leaf;
David S. Millerf11e6652007-03-24 20:36:25 -0700909
Wei Wang66f5d6c2017-10-06 12:06:10 -0700910 if (next != rt0) {
David Ahern93c2fb22018-04-18 15:38:59 -0700911 spin_lock_bh(&leaf->fib6_table->tb6_lock);
Wei Wang66f5d6c2017-10-06 12:06:10 -0700912 /* make sure next is not being deleted from the tree */
David Ahern93c2fb22018-04-18 15:38:59 -0700913 if (next->fib6_node)
Wei Wang66f5d6c2017-10-06 12:06:10 -0700914 rcu_assign_pointer(fn->rr_ptr, next);
David Ahern93c2fb22018-04-18 15:38:59 -0700915 spin_unlock_bh(&leaf->fib6_table->tb6_lock);
Wei Wang66f5d6c2017-10-06 12:06:10 -0700916 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 }
918
David Ahernb7bc4b62019-04-16 14:36:08 -0700919out:
920 if (!res->f6i) {
921 res->f6i = net->ipv6.fib6_null_entry;
David Ahern1cf844c2019-05-22 20:27:59 -0700922 res->nh = res->f6i->fib6_nh;
David Ahern7d21fec2019-04-16 14:36:11 -0700923 res->fib6_flags = res->f6i->fib6_flags;
924 res->fib6_type = res->f6i->fib6_type;
David Ahernb7bc4b62019-04-16 14:36:08 -0700925 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926}
927
David Ahern85bd05d2019-04-16 14:36:01 -0700928static bool rt6_is_gw_or_nonexthop(const struct fib6_result *res)
Martin KaFai Lau8b9df262015-05-22 20:55:59 -0700929{
David Ahern85bd05d2019-04-16 14:36:01 -0700930 return (res->f6i->fib6_flags & RTF_NONEXTHOP) ||
931 res->nh->fib_nh_gw_family;
Martin KaFai Lau8b9df262015-05-22 20:55:59 -0700932}
933
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800934#ifdef CONFIG_IPV6_ROUTE_INFO
935int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000936 const struct in6_addr *gwaddr)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800937{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900938 struct net *net = dev_net(dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800939 struct route_info *rinfo = (struct route_info *) opt;
940 struct in6_addr prefix_buf, *prefix;
941 unsigned int pref;
YOSHIFUJI Hideaki4bed72e2008-05-27 17:37:49 +0900942 unsigned long lifetime;
David Ahern8d1c8022018-04-17 17:33:26 -0700943 struct fib6_info *rt;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800944
945 if (len < sizeof(struct route_info)) {
946 return -EINVAL;
947 }
948
949 /* Sanity check for prefix_len and length */
950 if (rinfo->length > 3) {
951 return -EINVAL;
952 } else if (rinfo->prefix_len > 128) {
953 return -EINVAL;
954 } else if (rinfo->prefix_len > 64) {
955 if (rinfo->length < 2) {
956 return -EINVAL;
957 }
958 } else if (rinfo->prefix_len > 0) {
959 if (rinfo->length < 1) {
960 return -EINVAL;
961 }
962 }
963
964 pref = rinfo->route_pref;
965 if (pref == ICMPV6_ROUTER_PREF_INVALID)
Jens Rosenboom3933fc92009-09-10 06:25:11 +0000966 return -EINVAL;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800967
YOSHIFUJI Hideaki4bed72e2008-05-27 17:37:49 +0900968 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800969
970 if (rinfo->length == 3)
971 prefix = (struct in6_addr *)rinfo->prefix;
972 else {
973 /* this function is safe */
974 ipv6_addr_prefix(&prefix_buf,
975 (struct in6_addr *)rinfo->prefix,
976 rinfo->prefix_len);
977 prefix = &prefix_buf;
978 }
979
Duan Jiongf104a562013-11-08 09:56:53 +0800980 if (rinfo->prefix_len == 0)
David Ahernafb1d4b52018-04-17 17:33:11 -0700981 rt = rt6_get_dflt_router(net, gwaddr, dev);
Duan Jiongf104a562013-11-08 09:56:53 +0800982 else
983 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
David Ahern830218c2016-10-24 10:52:35 -0700984 gwaddr, dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800985
986 if (rt && !lifetime) {
Roopa Prabhu11dd74b2020-04-27 13:56:45 -0700987 ip6_del_rt(net, rt, false);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800988 rt = NULL;
989 }
990
991 if (!rt && lifetime)
David Ahern830218c2016-10-24 10:52:35 -0700992 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
993 dev, pref);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800994 else if (rt)
David Ahern93c2fb22018-04-18 15:38:59 -0700995 rt->fib6_flags = RTF_ROUTEINFO |
996 (rt->fib6_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800997
998 if (rt) {
Gao feng1716a962012-04-06 00:13:10 +0000999 if (!addrconf_finite_timeout(lifetime))
David Ahern14895682018-04-17 17:33:17 -07001000 fib6_clean_expires(rt);
Gao feng1716a962012-04-06 00:13:10 +00001001 else
David Ahern14895682018-04-17 17:33:17 -07001002 fib6_set_expires(rt, jiffies + HZ * lifetime);
Gao feng1716a962012-04-06 00:13:10 +00001003
David Ahern93531c62018-04-17 17:33:25 -07001004 fib6_info_release(rt);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08001005 }
1006 return 0;
1007}
1008#endif
1009
David Ahernae90d862018-04-17 17:33:12 -07001010/*
1011 * Misc support functions
1012 */
1013
1014/* called with rcu_lock held */
David Ahern0d161582019-04-16 14:36:04 -07001015static struct net_device *ip6_rt_get_dev_rcu(const struct fib6_result *res)
David Ahernae90d862018-04-17 17:33:12 -07001016{
David Ahern0d161582019-04-16 14:36:04 -07001017 struct net_device *dev = res->nh->fib_nh_dev;
David Ahernae90d862018-04-17 17:33:12 -07001018
David Ahern7d21fec2019-04-16 14:36:11 -07001019 if (res->fib6_flags & (RTF_LOCAL | RTF_ANYCAST)) {
David Ahernae90d862018-04-17 17:33:12 -07001020 /* for copies of local routes, dst->dev needs to be the
1021 * device if it is a master device, the master device if
1022 * device is enslaved, and the loopback as the default
1023 */
1024 if (netif_is_l3_slave(dev) &&
David Ahern7d21fec2019-04-16 14:36:11 -07001025 !rt6_need_strict(&res->f6i->fib6_dst.addr))
David Ahernae90d862018-04-17 17:33:12 -07001026 dev = l3mdev_master_dev_rcu(dev);
1027 else if (!netif_is_l3_master(dev))
1028 dev = dev_net(dev)->loopback_dev;
1029 /* last case is netif_is_l3_master(dev) is true in which
1030 * case we want dev returned to be dev
1031 */
1032 }
1033
1034 return dev;
1035}
1036
David Ahern6edb3c92018-04-17 17:33:15 -07001037static const int fib6_prop[RTN_MAX + 1] = {
1038 [RTN_UNSPEC] = 0,
1039 [RTN_UNICAST] = 0,
1040 [RTN_LOCAL] = 0,
1041 [RTN_BROADCAST] = 0,
1042 [RTN_ANYCAST] = 0,
1043 [RTN_MULTICAST] = 0,
1044 [RTN_BLACKHOLE] = -EINVAL,
1045 [RTN_UNREACHABLE] = -EHOSTUNREACH,
1046 [RTN_PROHIBIT] = -EACCES,
1047 [RTN_THROW] = -EAGAIN,
1048 [RTN_NAT] = -EINVAL,
1049 [RTN_XRESOLVE] = -EINVAL,
1050};
1051
1052static int ip6_rt_type_to_error(u8 fib6_type)
1053{
1054 return fib6_prop[fib6_type];
1055}
1056
David Ahern8d1c8022018-04-17 17:33:26 -07001057static unsigned short fib6_info_dst_flags(struct fib6_info *rt)
David Ahern3b6761d2018-04-17 17:33:20 -07001058{
1059 unsigned short flags = 0;
1060
1061 if (rt->dst_nocount)
1062 flags |= DST_NOCOUNT;
1063 if (rt->dst_nopolicy)
1064 flags |= DST_NOPOLICY;
David Ahern3b6761d2018-04-17 17:33:20 -07001065
1066 return flags;
1067}
1068
David Ahern7d21fec2019-04-16 14:36:11 -07001069static void ip6_rt_init_dst_reject(struct rt6_info *rt, u8 fib6_type)
David Ahern6edb3c92018-04-17 17:33:15 -07001070{
David Ahern7d21fec2019-04-16 14:36:11 -07001071 rt->dst.error = ip6_rt_type_to_error(fib6_type);
David Ahern6edb3c92018-04-17 17:33:15 -07001072
David Ahern7d21fec2019-04-16 14:36:11 -07001073 switch (fib6_type) {
David Ahern6edb3c92018-04-17 17:33:15 -07001074 case RTN_BLACKHOLE:
1075 rt->dst.output = dst_discard_out;
1076 rt->dst.input = dst_discard;
1077 break;
1078 case RTN_PROHIBIT:
1079 rt->dst.output = ip6_pkt_prohibit_out;
1080 rt->dst.input = ip6_pkt_prohibit;
1081 break;
1082 case RTN_THROW:
1083 case RTN_UNREACHABLE:
1084 default:
1085 rt->dst.output = ip6_pkt_discard_out;
1086 rt->dst.input = ip6_pkt_discard;
1087 break;
1088 }
1089}
1090
David Ahern0d161582019-04-16 14:36:04 -07001091static void ip6_rt_init_dst(struct rt6_info *rt, const struct fib6_result *res)
David Ahern6edb3c92018-04-17 17:33:15 -07001092{
David Ahern7d21fec2019-04-16 14:36:11 -07001093 struct fib6_info *f6i = res->f6i;
David Ahern0d161582019-04-16 14:36:04 -07001094
David Ahern7d21fec2019-04-16 14:36:11 -07001095 if (res->fib6_flags & RTF_REJECT) {
1096 ip6_rt_init_dst_reject(rt, res->fib6_type);
David Ahern6edb3c92018-04-17 17:33:15 -07001097 return;
1098 }
1099
1100 rt->dst.error = 0;
1101 rt->dst.output = ip6_output;
1102
David Ahern7d21fec2019-04-16 14:36:11 -07001103 if (res->fib6_type == RTN_LOCAL || res->fib6_type == RTN_ANYCAST) {
David Ahern6edb3c92018-04-17 17:33:15 -07001104 rt->dst.input = ip6_input;
David Ahern7d21fec2019-04-16 14:36:11 -07001105 } else if (ipv6_addr_type(&f6i->fib6_dst.addr) & IPV6_ADDR_MULTICAST) {
David Ahern6edb3c92018-04-17 17:33:15 -07001106 rt->dst.input = ip6_mc_input;
1107 } else {
1108 rt->dst.input = ip6_forward;
1109 }
1110
David Ahern0d161582019-04-16 14:36:04 -07001111 if (res->nh->fib_nh_lws) {
1112 rt->dst.lwtstate = lwtstate_get(res->nh->fib_nh_lws);
David Ahern6edb3c92018-04-17 17:33:15 -07001113 lwtunnel_set_redirect(&rt->dst);
1114 }
1115
1116 rt->dst.lastuse = jiffies;
1117}
1118
Wei Wange873e4b2018-07-21 20:56:32 -07001119/* Caller must already hold reference to @from */
David Ahern8d1c8022018-04-17 17:33:26 -07001120static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
David Ahernae90d862018-04-17 17:33:12 -07001121{
David Ahernae90d862018-04-17 17:33:12 -07001122 rt->rt6i_flags &= ~RTF_EXPIRES;
David Aherna68886a2018-04-20 15:38:02 -07001123 rcu_assign_pointer(rt->from, from);
David Aherne1255ed2018-10-04 20:07:53 -07001124 ip_dst_init_metrics(&rt->dst, from->fib6_metrics);
David Ahernae90d862018-04-17 17:33:12 -07001125}
1126
David Ahern0d161582019-04-16 14:36:04 -07001127/* Caller must already hold reference to f6i in result */
1128static void ip6_rt_copy_init(struct rt6_info *rt, const struct fib6_result *res)
David Ahernae90d862018-04-17 17:33:12 -07001129{
David Ahern0d161582019-04-16 14:36:04 -07001130 const struct fib6_nh *nh = res->nh;
1131 const struct net_device *dev = nh->fib_nh_dev;
1132 struct fib6_info *f6i = res->f6i;
David Aherndcd1f572018-04-18 15:39:05 -07001133
David Ahern0d161582019-04-16 14:36:04 -07001134 ip6_rt_init_dst(rt, res);
David Ahern6edb3c92018-04-17 17:33:15 -07001135
David Ahern0d161582019-04-16 14:36:04 -07001136 rt->rt6i_dst = f6i->fib6_dst;
David Aherndcd1f572018-04-18 15:39:05 -07001137 rt->rt6i_idev = dev ? in6_dev_get(dev) : NULL;
David Ahern7d21fec2019-04-16 14:36:11 -07001138 rt->rt6i_flags = res->fib6_flags;
David Ahern0d161582019-04-16 14:36:04 -07001139 if (nh->fib_nh_gw_family) {
1140 rt->rt6i_gateway = nh->fib_nh_gw6;
David Ahern2b2450c2019-03-27 20:53:52 -07001141 rt->rt6i_flags |= RTF_GATEWAY;
1142 }
David Ahern0d161582019-04-16 14:36:04 -07001143 rt6_set_from(rt, f6i);
David Ahernae90d862018-04-17 17:33:12 -07001144#ifdef CONFIG_IPV6_SUBTREES
David Ahern0d161582019-04-16 14:36:04 -07001145 rt->rt6i_src = f6i->fib6_src;
David Ahernae90d862018-04-17 17:33:12 -07001146#endif
David Ahernae90d862018-04-17 17:33:12 -07001147}
1148
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001149static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1150 struct in6_addr *saddr)
1151{
Wei Wang66f5d6c2017-10-06 12:06:10 -07001152 struct fib6_node *pn, *sn;
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001153 while (1) {
1154 if (fn->fn_flags & RTN_TL_ROOT)
1155 return NULL;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001156 pn = rcu_dereference(fn->parent);
1157 sn = FIB6_SUBTREE(pn);
1158 if (sn && sn != fn)
David Ahern64547432018-05-09 20:34:19 -07001159 fn = fib6_node_lookup(sn, NULL, saddr);
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001160 else
1161 fn = pn;
1162 if (fn->fn_flags & RTN_RTINFO)
1163 return fn;
1164 }
1165}
Thomas Grafc71099a2006-08-04 23:20:06 -07001166
David Ahern10585b42019-03-20 09:24:50 -07001167static bool ip6_hold_safe(struct net *net, struct rt6_info **prt)
Wei Wangd3843fe2017-10-06 12:06:06 -07001168{
1169 struct rt6_info *rt = *prt;
1170
1171 if (dst_hold_safe(&rt->dst))
1172 return true;
David Ahern10585b42019-03-20 09:24:50 -07001173 if (net) {
Wei Wangd3843fe2017-10-06 12:06:06 -07001174 rt = net->ipv6.ip6_null_entry;
1175 dst_hold(&rt->dst);
1176 } else {
1177 rt = NULL;
1178 }
1179 *prt = rt;
1180 return false;
1181}
1182
David Aherndec9b0e2018-04-17 17:33:19 -07001183/* called with rcu_lock held */
David Ahern9b6b35a2019-04-16 14:36:02 -07001184static struct rt6_info *ip6_create_rt_rcu(const struct fib6_result *res)
David Aherndec9b0e2018-04-17 17:33:19 -07001185{
David Ahern9b6b35a2019-04-16 14:36:02 -07001186 struct net_device *dev = res->nh->fib_nh_dev;
1187 struct fib6_info *f6i = res->f6i;
1188 unsigned short flags;
David Aherndec9b0e2018-04-17 17:33:19 -07001189 struct rt6_info *nrt;
1190
David Ahern9b6b35a2019-04-16 14:36:02 -07001191 if (!fib6_info_hold_safe(f6i))
Xin Long1c87e792019-03-20 14:45:48 +08001192 goto fallback;
Wei Wange873e4b2018-07-21 20:56:32 -07001193
David Ahern9b6b35a2019-04-16 14:36:02 -07001194 flags = fib6_info_dst_flags(f6i);
David Ahern93531c62018-04-17 17:33:25 -07001195 nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
Xin Long1c87e792019-03-20 14:45:48 +08001196 if (!nrt) {
David Ahern9b6b35a2019-04-16 14:36:02 -07001197 fib6_info_release(f6i);
Xin Long1c87e792019-03-20 14:45:48 +08001198 goto fallback;
1199 }
David Aherndec9b0e2018-04-17 17:33:19 -07001200
David Ahern0d161582019-04-16 14:36:04 -07001201 ip6_rt_copy_init(nrt, res);
Xin Long1c87e792019-03-20 14:45:48 +08001202 return nrt;
1203
1204fallback:
1205 nrt = dev_net(dev)->ipv6.ip6_null_entry;
1206 dst_hold(&nrt->dst);
David Aherndec9b0e2018-04-17 17:33:19 -07001207 return nrt;
1208}
1209
Daniel Lezcano8ed67782008-03-04 13:48:30 -08001210static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1211 struct fib6_table *table,
David Ahernb75cc8f2018-03-02 08:32:17 -08001212 struct flowi6 *fl6,
1213 const struct sk_buff *skb,
1214 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215{
David Ahernb1d40992019-04-16 14:35:59 -07001216 struct fib6_result res = {};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 struct fib6_node *fn;
David Ahern23fb93a2018-04-17 17:33:23 -07001218 struct rt6_info *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219
David Ahernb6cdbc82018-03-29 17:44:57 -07001220 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1221 flags &= ~RT6_LOOKUP_F_IFACE;
1222
Wei Wang66f5d6c2017-10-06 12:06:10 -07001223 rcu_read_lock();
David Ahern64547432018-05-09 20:34:19 -07001224 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
Thomas Grafc71099a2006-08-04 23:20:06 -07001225restart:
David Ahernb1d40992019-04-16 14:35:59 -07001226 res.f6i = rcu_dereference(fn->leaf);
1227 if (!res.f6i)
1228 res.f6i = net->ipv6.fib6_null_entry;
David Ahernaf52a522019-04-09 14:41:16 -07001229 else
David Ahern75ef7382019-04-16 14:36:07 -07001230 rt6_device_match(net, &res, &fl6->saddr, fl6->flowi6_oif,
1231 flags);
David Ahernaf52a522019-04-09 14:41:16 -07001232
David Ahernb1d40992019-04-16 14:35:59 -07001233 if (res.f6i == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001234 fn = fib6_backtrack(fn, &fl6->saddr);
1235 if (fn)
1236 goto restart;
David Ahernaf52a522019-04-09 14:41:16 -07001237
1238 rt = net->ipv6.ip6_null_entry;
1239 dst_hold(&rt->dst);
1240 goto out;
David Ahernf88d8ea2019-06-03 20:19:52 -07001241 } else if (res.fib6_flags & RTF_REJECT) {
1242 goto do_create;
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001243 }
Wei Wang2b760fc2017-10-06 12:06:03 -07001244
David Ahernb1d40992019-04-16 14:35:59 -07001245 fib6_select_path(net, &res, fl6, fl6->flowi6_oif,
1246 fl6->flowi6_oif != 0, skb, flags);
1247
David S. Miller4c9483b2011-03-12 16:22:43 -05001248 /* Search through exception table */
David Ahern7e4b5122019-04-16 14:36:00 -07001249 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
David Ahern23fb93a2018-04-17 17:33:23 -07001250 if (rt) {
David Ahern10585b42019-03-20 09:24:50 -07001251 if (ip6_hold_safe(net, &rt))
David Aherndec9b0e2018-04-17 17:33:19 -07001252 dst_use_noref(&rt->dst, jiffies);
David Ahern23fb93a2018-04-17 17:33:23 -07001253 } else {
David Ahernf88d8ea2019-06-03 20:19:52 -07001254do_create:
David Ahern9b6b35a2019-04-16 14:36:02 -07001255 rt = ip6_create_rt_rcu(&res);
David Aherndec9b0e2018-04-17 17:33:19 -07001256 }
Wei Wangd3843fe2017-10-06 12:06:06 -07001257
David Ahernaf52a522019-04-09 14:41:16 -07001258out:
David Ahern8ff2e5b2019-04-16 14:36:09 -07001259 trace_fib6_table_lookup(net, &res, table, fl6);
David Ahernaf52a522019-04-09 14:41:16 -07001260
Wei Wang66f5d6c2017-10-06 12:06:10 -07001261 rcu_read_unlock();
David Ahernb8115802015-11-19 12:24:22 -08001262
Thomas Grafc71099a2006-08-04 23:20:06 -07001263 return rt;
Thomas Grafc71099a2006-08-04 23:20:06 -07001264}
1265
Ian Morris67ba4152014-08-24 21:53:10 +01001266struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
David Ahernb75cc8f2018-03-02 08:32:17 -08001267 const struct sk_buff *skb, int flags)
Florian Westphalea6e5742011-09-05 16:05:44 +02001268{
David Ahernb75cc8f2018-03-02 08:32:17 -08001269 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
Florian Westphalea6e5742011-09-05 16:05:44 +02001270}
1271EXPORT_SYMBOL_GPL(ip6_route_lookup);
1272
YOSHIFUJI Hideaki9acd9f32008-04-10 15:42:10 +09001273struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
David Ahernb75cc8f2018-03-02 08:32:17 -08001274 const struct in6_addr *saddr, int oif,
1275 const struct sk_buff *skb, int strict)
Thomas Grafc71099a2006-08-04 23:20:06 -07001276{
David S. Miller4c9483b2011-03-12 16:22:43 -05001277 struct flowi6 fl6 = {
1278 .flowi6_oif = oif,
1279 .daddr = *daddr,
Thomas Grafc71099a2006-08-04 23:20:06 -07001280 };
1281 struct dst_entry *dst;
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -07001282 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
Thomas Grafc71099a2006-08-04 23:20:06 -07001283
Thomas Grafadaa70b2006-10-13 15:01:03 -07001284 if (saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -05001285 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
Thomas Grafadaa70b2006-10-13 15:01:03 -07001286 flags |= RT6_LOOKUP_F_HAS_SADDR;
1287 }
1288
David Ahernb75cc8f2018-03-02 08:32:17 -08001289 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
Thomas Grafc71099a2006-08-04 23:20:06 -07001290 if (dst->error == 0)
1291 return (struct rt6_info *) dst;
1292
1293 dst_release(dst);
1294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 return NULL;
1296}
YOSHIFUJI Hideaki71590392007-02-22 22:05:40 +09001297EXPORT_SYMBOL(rt6_lookup);
1298
Thomas Grafc71099a2006-08-04 23:20:06 -07001299/* ip6_ins_rt is called with FREE table->tb6_lock.
Wei Wang1cfb71e2017-06-17 10:42:33 -07001300 * It takes new route entry, the addition fails by any reason the
1301 * route is released.
1302 * Caller must hold dst before calling it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 */
1304
David Ahern8d1c8022018-04-17 17:33:26 -07001305static int __ip6_ins_rt(struct fib6_info *rt, struct nl_info *info,
David Ahern333c4302017-05-21 10:12:04 -06001306 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307{
1308 int err;
Thomas Grafc71099a2006-08-04 23:20:06 -07001309 struct fib6_table *table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310
David Ahern93c2fb22018-04-18 15:38:59 -07001311 table = rt->fib6_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001312 spin_lock_bh(&table->tb6_lock);
David Ahernd4ead6b2018-04-17 17:33:16 -07001313 err = fib6_add(&table->tb6_root, rt, info, extack);
Wei Wang66f5d6c2017-10-06 12:06:10 -07001314 spin_unlock_bh(&table->tb6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
1316 return err;
1317}
1318
David Ahern8d1c8022018-04-17 17:33:26 -07001319int ip6_ins_rt(struct net *net, struct fib6_info *rt)
Thomas Graf40e22e82006-08-22 00:00:45 -07001320{
David Ahernafb1d4b52018-04-17 17:33:11 -07001321 struct nl_info info = { .nl_net = net, };
Florian Westphale715b6d2015-01-05 23:57:44 +01001322
David Ahernd4ead6b2018-04-17 17:33:16 -07001323 return __ip6_ins_rt(rt, &info, NULL);
Thomas Graf40e22e82006-08-22 00:00:45 -07001324}
1325
David Ahern85bd05d2019-04-16 14:36:01 -07001326static struct rt6_info *ip6_rt_cache_alloc(const struct fib6_result *res,
Martin KaFai Lau8b9df262015-05-22 20:55:59 -07001327 const struct in6_addr *daddr,
1328 const struct in6_addr *saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329{
David Ahern85bd05d2019-04-16 14:36:01 -07001330 struct fib6_info *f6i = res->f6i;
David Ahern4832c302017-08-17 12:17:20 -07001331 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 struct rt6_info *rt;
1333
1334 /*
1335 * Clone the route.
1336 */
1337
David Ahern85bd05d2019-04-16 14:36:01 -07001338 if (!fib6_info_hold_safe(f6i))
Wei Wange873e4b2018-07-21 20:56:32 -07001339 return NULL;
1340
David Ahern0d161582019-04-16 14:36:04 -07001341 dev = ip6_rt_get_dev_rcu(res);
David Ahern93531c62018-04-17 17:33:25 -07001342 rt = ip6_dst_alloc(dev_net(dev), dev, 0);
Wei Wange873e4b2018-07-21 20:56:32 -07001343 if (!rt) {
David Ahern85bd05d2019-04-16 14:36:01 -07001344 fib6_info_release(f6i);
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001345 return NULL;
Wei Wange873e4b2018-07-21 20:56:32 -07001346 }
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001347
David Ahern0d161582019-04-16 14:36:04 -07001348 ip6_rt_copy_init(rt, res);
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001349 rt->rt6i_flags |= RTF_CACHE;
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001350 rt->rt6i_dst.addr = *daddr;
1351 rt->rt6i_dst.plen = 128;
1352
David Ahern85bd05d2019-04-16 14:36:01 -07001353 if (!rt6_is_gw_or_nonexthop(res)) {
1354 if (f6i->fib6_dst.plen != 128 &&
1355 ipv6_addr_equal(&f6i->fib6_dst.addr, daddr))
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001356 rt->rt6i_flags |= RTF_ANYCAST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357#ifdef CONFIG_IPV6_SUBTREES
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001358 if (rt->rt6i_src.plen && saddr) {
1359 rt->rt6i_src.addr = *saddr;
1360 rt->rt6i_src.plen = 128;
Martin KaFai Lau8b9df262015-05-22 20:55:59 -07001361 }
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001362#endif
YOSHIFUJI Hideaki95a9a5b2006-03-20 16:55:51 -08001363 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
YOSHIFUJI Hideaki95a9a5b2006-03-20 16:55:51 -08001365 return rt;
1366}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
David Aherndb3fede2019-04-16 14:36:03 -07001368static struct rt6_info *ip6_rt_pcpu_alloc(const struct fib6_result *res)
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001369{
David Aherndb3fede2019-04-16 14:36:03 -07001370 struct fib6_info *f6i = res->f6i;
1371 unsigned short flags = fib6_info_dst_flags(f6i);
David Ahern4832c302017-08-17 12:17:20 -07001372 struct net_device *dev;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001373 struct rt6_info *pcpu_rt;
1374
David Aherndb3fede2019-04-16 14:36:03 -07001375 if (!fib6_info_hold_safe(f6i))
Wei Wange873e4b2018-07-21 20:56:32 -07001376 return NULL;
1377
David Ahern4832c302017-08-17 12:17:20 -07001378 rcu_read_lock();
David Ahern0d161582019-04-16 14:36:04 -07001379 dev = ip6_rt_get_dev_rcu(res);
David Ahern93531c62018-04-17 17:33:25 -07001380 pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
David Ahern4832c302017-08-17 12:17:20 -07001381 rcu_read_unlock();
Wei Wange873e4b2018-07-21 20:56:32 -07001382 if (!pcpu_rt) {
David Aherndb3fede2019-04-16 14:36:03 -07001383 fib6_info_release(f6i);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001384 return NULL;
Wei Wange873e4b2018-07-21 20:56:32 -07001385 }
David Ahern0d161582019-04-16 14:36:04 -07001386 ip6_rt_copy_init(pcpu_rt, res);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001387 pcpu_rt->rt6i_flags |= RTF_PCPU;
1388 return pcpu_rt;
1389}
1390
Wei Wang66f5d6c2017-10-06 12:06:10 -07001391/* It should be called with rcu_read_lock() acquired */
David Aherndb3fede2019-04-16 14:36:03 -07001392static struct rt6_info *rt6_get_pcpu_route(const struct fib6_result *res)
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001393{
Eric Dumazetc3530712019-05-31 18:11:25 -07001394 struct rt6_info *pcpu_rt;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001395
Eric Dumazetc3530712019-05-31 18:11:25 -07001396 pcpu_rt = this_cpu_read(*res->nh->rt6i_pcpu);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001397
Martin KaFai Laua73e4192015-08-14 11:05:53 -07001398 return pcpu_rt;
1399}
1400
David Ahernafb1d4b52018-04-17 17:33:11 -07001401static struct rt6_info *rt6_make_pcpu_route(struct net *net,
David Aherndb3fede2019-04-16 14:36:03 -07001402 const struct fib6_result *res)
Martin KaFai Laua73e4192015-08-14 11:05:53 -07001403{
1404 struct rt6_info *pcpu_rt, *prev, **p;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001405
David Aherndb3fede2019-04-16 14:36:03 -07001406 pcpu_rt = ip6_rt_pcpu_alloc(res);
Wei Wang0e09edc2019-06-20 17:36:37 -07001407 if (!pcpu_rt)
1408 return NULL;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001409
David Ahernf40b6ae2019-05-22 20:27:55 -07001410 p = this_cpu_ptr(res->nh->rt6i_pcpu);
Wei Wanga94b9362017-10-06 12:06:04 -07001411 prev = cmpxchg(p, NULL, pcpu_rt);
Eric Dumazet951f7882017-10-08 21:07:18 -07001412 BUG_ON(prev);
Wei Wanga94b9362017-10-06 12:06:04 -07001413
Eric Dumazet61fb0d02019-05-15 19:39:52 -07001414 if (res->f6i->fib6_destroying) {
1415 struct fib6_info *from;
1416
1417 from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL);
1418 fib6_info_release(from);
1419 }
1420
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001421 return pcpu_rt;
1422}
1423
Wei Wang35732d02017-10-06 12:05:57 -07001424/* exception hash table implementation
1425 */
1426static DEFINE_SPINLOCK(rt6_exception_lock);
1427
1428/* Remove rt6_ex from hash table and free the memory
1429 * Caller must hold rt6_exception_lock
1430 */
1431static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1432 struct rt6_exception *rt6_ex)
1433{
Paolo Abenif5b51fe2019-02-20 18:18:12 +01001434 struct fib6_info *from;
Colin Ian Kingb2427e62017-10-10 18:01:16 +01001435 struct net *net;
Wei Wang81eb8442017-10-06 12:06:11 -07001436
Wei Wang35732d02017-10-06 12:05:57 -07001437 if (!bucket || !rt6_ex)
1438 return;
Colin Ian Kingb2427e62017-10-10 18:01:16 +01001439
1440 net = dev_net(rt6_ex->rt6i->dst.dev);
Paolo Abenif5b51fe2019-02-20 18:18:12 +01001441 net->ipv6.rt6_stats->fib_rt_cache--;
1442
1443 /* purge completely the exception to allow releasing the held resources:
1444 * some [sk] cache may keep the dst around for unlimited time
1445 */
Eric Dumazet0e233872019-04-28 12:22:25 -07001446 from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL);
Paolo Abenif5b51fe2019-02-20 18:18:12 +01001447 fib6_info_release(from);
1448 dst_dev_put(&rt6_ex->rt6i->dst);
1449
Wei Wang35732d02017-10-06 12:05:57 -07001450 hlist_del_rcu(&rt6_ex->hlist);
David Ahern77634cc2018-04-17 17:33:27 -07001451 dst_release(&rt6_ex->rt6i->dst);
Wei Wang35732d02017-10-06 12:05:57 -07001452 kfree_rcu(rt6_ex, rcu);
1453 WARN_ON_ONCE(!bucket->depth);
1454 bucket->depth--;
1455}
1456
1457/* Remove oldest rt6_ex in bucket and free the memory
1458 * Caller must hold rt6_exception_lock
1459 */
1460static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1461{
1462 struct rt6_exception *rt6_ex, *oldest = NULL;
1463
1464 if (!bucket)
1465 return;
1466
1467 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1468 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1469 oldest = rt6_ex;
1470 }
1471 rt6_remove_exception(bucket, oldest);
1472}
1473
1474static u32 rt6_exception_hash(const struct in6_addr *dst,
1475 const struct in6_addr *src)
1476{
1477 static u32 seed __read_mostly;
1478 u32 val;
1479
1480 net_get_random_once(&seed, sizeof(seed));
Eric Dumazetb6b556a2019-11-03 18:24:16 -08001481 val = jhash2((const u32 *)dst, sizeof(*dst)/sizeof(u32), seed);
Wei Wang35732d02017-10-06 12:05:57 -07001482
1483#ifdef CONFIG_IPV6_SUBTREES
1484 if (src)
Eric Dumazetb6b556a2019-11-03 18:24:16 -08001485 val = jhash2((const u32 *)src, sizeof(*src)/sizeof(u32), val);
Wei Wang35732d02017-10-06 12:05:57 -07001486#endif
1487 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1488}
1489
1490/* Helper function to find the cached rt in the hash table
1491 * and update bucket pointer to point to the bucket for this
1492 * (daddr, saddr) pair
1493 * Caller must hold rt6_exception_lock
1494 */
1495static struct rt6_exception *
1496__rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1497 const struct in6_addr *daddr,
1498 const struct in6_addr *saddr)
1499{
1500 struct rt6_exception *rt6_ex;
1501 u32 hval;
1502
1503 if (!(*bucket) || !daddr)
1504 return NULL;
1505
1506 hval = rt6_exception_hash(daddr, saddr);
1507 *bucket += hval;
1508
1509 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1510 struct rt6_info *rt6 = rt6_ex->rt6i;
1511 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1512
1513#ifdef CONFIG_IPV6_SUBTREES
1514 if (matched && saddr)
1515 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1516#endif
1517 if (matched)
1518 return rt6_ex;
1519 }
1520 return NULL;
1521}
1522
1523/* Helper function to find the cached rt in the hash table
1524 * and update bucket pointer to point to the bucket for this
1525 * (daddr, saddr) pair
1526 * Caller must hold rcu_read_lock()
1527 */
1528static struct rt6_exception *
1529__rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1530 const struct in6_addr *daddr,
1531 const struct in6_addr *saddr)
1532{
1533 struct rt6_exception *rt6_ex;
1534 u32 hval;
1535
1536 WARN_ON_ONCE(!rcu_read_lock_held());
1537
1538 if (!(*bucket) || !daddr)
1539 return NULL;
1540
1541 hval = rt6_exception_hash(daddr, saddr);
1542 *bucket += hval;
1543
1544 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1545 struct rt6_info *rt6 = rt6_ex->rt6i;
1546 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1547
1548#ifdef CONFIG_IPV6_SUBTREES
1549 if (matched && saddr)
1550 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1551#endif
1552 if (matched)
1553 return rt6_ex;
1554 }
1555 return NULL;
1556}
1557
David Ahernb748f262019-04-16 14:36:06 -07001558static unsigned int fib6_mtu(const struct fib6_result *res)
Wei Wang35732d02017-10-06 12:05:57 -07001559{
David Ahernb748f262019-04-16 14:36:06 -07001560 const struct fib6_nh *nh = res->nh;
David Ahernd4ead6b2018-04-17 17:33:16 -07001561 unsigned int mtu;
1562
David Ahernb748f262019-04-16 14:36:06 -07001563 if (res->f6i->fib6_pmtu) {
1564 mtu = res->f6i->fib6_pmtu;
David Aherndcd1f572018-04-18 15:39:05 -07001565 } else {
David Ahernb748f262019-04-16 14:36:06 -07001566 struct net_device *dev = nh->fib_nh_dev;
David Aherndcd1f572018-04-18 15:39:05 -07001567 struct inet6_dev *idev;
1568
1569 rcu_read_lock();
1570 idev = __in6_dev_get(dev);
1571 mtu = idev->cnf.mtu6;
1572 rcu_read_unlock();
1573 }
1574
David Ahernd4ead6b2018-04-17 17:33:16 -07001575 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1576
David Ahernb748f262019-04-16 14:36:06 -07001577 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
David Ahernd4ead6b2018-04-17 17:33:16 -07001578}
1579
David Aherncc5c0732019-05-22 20:27:58 -07001580#define FIB6_EXCEPTION_BUCKET_FLUSHED 0x1UL
1581
1582/* used when the flushed bit is not relevant, only access to the bucket
1583 * (ie., all bucket users except rt6_insert_exception);
1584 *
1585 * called under rcu lock; sometimes called with rt6_exception_lock held
1586 */
1587static
1588struct rt6_exception_bucket *fib6_nh_get_excptn_bucket(const struct fib6_nh *nh,
1589 spinlock_t *lock)
1590{
1591 struct rt6_exception_bucket *bucket;
1592
1593 if (lock)
1594 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1595 lockdep_is_held(lock));
1596 else
1597 bucket = rcu_dereference(nh->rt6i_exception_bucket);
1598
1599 /* remove bucket flushed bit if set */
1600 if (bucket) {
1601 unsigned long p = (unsigned long)bucket;
1602
1603 p &= ~FIB6_EXCEPTION_BUCKET_FLUSHED;
1604 bucket = (struct rt6_exception_bucket *)p;
1605 }
1606
1607 return bucket;
1608}
1609
1610static bool fib6_nh_excptn_bucket_flushed(struct rt6_exception_bucket *bucket)
1611{
1612 unsigned long p = (unsigned long)bucket;
1613
1614 return !!(p & FIB6_EXCEPTION_BUCKET_FLUSHED);
1615}
1616
1617/* called with rt6_exception_lock held */
1618static void fib6_nh_excptn_bucket_set_flushed(struct fib6_nh *nh,
1619 spinlock_t *lock)
1620{
1621 struct rt6_exception_bucket *bucket;
1622 unsigned long p;
1623
1624 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1625 lockdep_is_held(lock));
1626
1627 p = (unsigned long)bucket;
1628 p |= FIB6_EXCEPTION_BUCKET_FLUSHED;
1629 bucket = (struct rt6_exception_bucket *)p;
1630 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1631}
1632
Wei Wang35732d02017-10-06 12:05:57 -07001633static int rt6_insert_exception(struct rt6_info *nrt,
David Ahern5012f0a2019-04-16 14:36:05 -07001634 const struct fib6_result *res)
Wei Wang35732d02017-10-06 12:05:57 -07001635{
David Ahern5e670d82018-04-17 17:33:14 -07001636 struct net *net = dev_net(nrt->dst.dev);
Wei Wang35732d02017-10-06 12:05:57 -07001637 struct rt6_exception_bucket *bucket;
David Aherncc5c0732019-05-22 20:27:58 -07001638 struct fib6_info *f6i = res->f6i;
Wei Wang35732d02017-10-06 12:05:57 -07001639 struct in6_addr *src_key = NULL;
1640 struct rt6_exception *rt6_ex;
David Aherncc5c0732019-05-22 20:27:58 -07001641 struct fib6_nh *nh = res->nh;
Wei Wang35732d02017-10-06 12:05:57 -07001642 int err = 0;
1643
Wei Wang35732d02017-10-06 12:05:57 -07001644 spin_lock_bh(&rt6_exception_lock);
1645
David Aherncc5c0732019-05-22 20:27:58 -07001646 bucket = rcu_dereference_protected(nh->rt6i_exception_bucket,
1647 lockdep_is_held(&rt6_exception_lock));
Wei Wang35732d02017-10-06 12:05:57 -07001648 if (!bucket) {
1649 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1650 GFP_ATOMIC);
1651 if (!bucket) {
1652 err = -ENOMEM;
1653 goto out;
1654 }
David Aherncc5c0732019-05-22 20:27:58 -07001655 rcu_assign_pointer(nh->rt6i_exception_bucket, bucket);
1656 } else if (fib6_nh_excptn_bucket_flushed(bucket)) {
1657 err = -EINVAL;
1658 goto out;
Wei Wang35732d02017-10-06 12:05:57 -07001659 }
1660
1661#ifdef CONFIG_IPV6_SUBTREES
David Ahern5012f0a2019-04-16 14:36:05 -07001662 /* fib6_src.plen != 0 indicates f6i is in subtree
Wei Wang35732d02017-10-06 12:05:57 -07001663 * and exception table is indexed by a hash of
David Ahern5012f0a2019-04-16 14:36:05 -07001664 * both fib6_dst and fib6_src.
Wei Wang35732d02017-10-06 12:05:57 -07001665 * Otherwise, the exception table is indexed by
David Ahern5012f0a2019-04-16 14:36:05 -07001666 * a hash of only fib6_dst.
Wei Wang35732d02017-10-06 12:05:57 -07001667 */
David Ahern5012f0a2019-04-16 14:36:05 -07001668 if (f6i->fib6_src.plen)
Wei Wang35732d02017-10-06 12:05:57 -07001669 src_key = &nrt->rt6i_src.addr;
1670#endif
David Ahern5012f0a2019-04-16 14:36:05 -07001671 /* rt6_mtu_change() might lower mtu on f6i.
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001672 * Only insert this exception route if its mtu
David Ahern5012f0a2019-04-16 14:36:05 -07001673 * is less than f6i's mtu value.
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001674 */
David Ahernb748f262019-04-16 14:36:06 -07001675 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(res)) {
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001676 err = -EINVAL;
1677 goto out;
1678 }
Wei Wang60006a42017-10-06 12:05:58 -07001679
Wei Wang35732d02017-10-06 12:05:57 -07001680 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1681 src_key);
1682 if (rt6_ex)
1683 rt6_remove_exception(bucket, rt6_ex);
1684
1685 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1686 if (!rt6_ex) {
1687 err = -ENOMEM;
1688 goto out;
1689 }
1690 rt6_ex->rt6i = nrt;
1691 rt6_ex->stamp = jiffies;
Wei Wang35732d02017-10-06 12:05:57 -07001692 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1693 bucket->depth++;
Wei Wang81eb8442017-10-06 12:06:11 -07001694 net->ipv6.rt6_stats->fib_rt_cache++;
Wei Wang35732d02017-10-06 12:05:57 -07001695
1696 if (bucket->depth > FIB6_MAX_DEPTH)
1697 rt6_exception_remove_oldest(bucket);
1698
1699out:
1700 spin_unlock_bh(&rt6_exception_lock);
1701
1702 /* Update fn->fn_sernum to invalidate all cached dst */
Paolo Abenib886d5f2017-10-19 16:07:10 +02001703 if (!err) {
David Ahern5012f0a2019-04-16 14:36:05 -07001704 spin_lock_bh(&f6i->fib6_table->tb6_lock);
1705 fib6_update_sernum(net, f6i);
1706 spin_unlock_bh(&f6i->fib6_table->tb6_lock);
Paolo Abenib886d5f2017-10-19 16:07:10 +02001707 fib6_force_start_gc(net);
1708 }
Wei Wang35732d02017-10-06 12:05:57 -07001709
1710 return err;
1711}
1712
David Ahernc0b220c2019-05-22 20:27:57 -07001713static void fib6_nh_flush_exceptions(struct fib6_nh *nh, struct fib6_info *from)
Wei Wang35732d02017-10-06 12:05:57 -07001714{
1715 struct rt6_exception_bucket *bucket;
1716 struct rt6_exception *rt6_ex;
1717 struct hlist_node *tmp;
1718 int i;
1719
1720 spin_lock_bh(&rt6_exception_lock);
Wei Wang35732d02017-10-06 12:05:57 -07001721
David Aherncc5c0732019-05-22 20:27:58 -07001722 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
Wei Wang35732d02017-10-06 12:05:57 -07001723 if (!bucket)
1724 goto out;
1725
David Aherncc5c0732019-05-22 20:27:58 -07001726 /* Prevent rt6_insert_exception() to recreate the bucket list */
1727 if (!from)
1728 fib6_nh_excptn_bucket_set_flushed(nh, &rt6_exception_lock);
1729
Wei Wang35732d02017-10-06 12:05:57 -07001730 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
David Aherncc5c0732019-05-22 20:27:58 -07001731 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) {
1732 if (!from ||
1733 rcu_access_pointer(rt6_ex->rt6i->from) == from)
1734 rt6_remove_exception(bucket, rt6_ex);
1735 }
1736 WARN_ON_ONCE(!from && bucket->depth);
Wei Wang35732d02017-10-06 12:05:57 -07001737 bucket++;
1738 }
Wei Wang35732d02017-10-06 12:05:57 -07001739out:
1740 spin_unlock_bh(&rt6_exception_lock);
1741}
1742
David Aherne659ba32019-06-08 14:53:28 -07001743static int rt6_nh_flush_exceptions(struct fib6_nh *nh, void *arg)
1744{
1745 struct fib6_info *f6i = arg;
1746
1747 fib6_nh_flush_exceptions(nh, f6i);
1748
1749 return 0;
1750}
1751
David Ahernc0b220c2019-05-22 20:27:57 -07001752void rt6_flush_exceptions(struct fib6_info *f6i)
1753{
David Aherne659ba32019-06-08 14:53:28 -07001754 if (f6i->nh)
1755 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_flush_exceptions,
1756 f6i);
1757 else
1758 fib6_nh_flush_exceptions(f6i->fib6_nh, f6i);
David Ahernc0b220c2019-05-22 20:27:57 -07001759}
1760
Wei Wang35732d02017-10-06 12:05:57 -07001761/* Find cached rt in the hash table inside passed in rt
1762 * Caller has to hold rcu_read_lock()
1763 */
David Ahern7e4b5122019-04-16 14:36:00 -07001764static struct rt6_info *rt6_find_cached_rt(const struct fib6_result *res,
Wei Wang510e2ce2019-05-16 13:30:54 -07001765 const struct in6_addr *daddr,
1766 const struct in6_addr *saddr)
Wei Wang35732d02017-10-06 12:05:57 -07001767{
Wei Wang510e2ce2019-05-16 13:30:54 -07001768 const struct in6_addr *src_key = NULL;
Wei Wang35732d02017-10-06 12:05:57 -07001769 struct rt6_exception_bucket *bucket;
Wei Wang35732d02017-10-06 12:05:57 -07001770 struct rt6_exception *rt6_ex;
David Ahern7e4b5122019-04-16 14:36:00 -07001771 struct rt6_info *ret = NULL;
Wei Wang35732d02017-10-06 12:05:57 -07001772
Wei Wang35732d02017-10-06 12:05:57 -07001773#ifdef CONFIG_IPV6_SUBTREES
David Ahern7e4b5122019-04-16 14:36:00 -07001774 /* fib6i_src.plen != 0 indicates f6i is in subtree
Wei Wang35732d02017-10-06 12:05:57 -07001775 * and exception table is indexed by a hash of
David Ahern7e4b5122019-04-16 14:36:00 -07001776 * both fib6_dst and fib6_src.
Wei Wang510e2ce2019-05-16 13:30:54 -07001777 * However, the src addr used to create the hash
1778 * might not be exactly the passed in saddr which
1779 * is a /128 addr from the flow.
1780 * So we need to use f6i->fib6_src to redo lookup
1781 * if the passed in saddr does not find anything.
1782 * (See the logic in ip6_rt_cache_alloc() on how
1783 * rt->rt6i_src is updated.)
Wei Wang35732d02017-10-06 12:05:57 -07001784 */
David Ahern7e4b5122019-04-16 14:36:00 -07001785 if (res->f6i->fib6_src.plen)
Wei Wang35732d02017-10-06 12:05:57 -07001786 src_key = saddr;
Wei Wang510e2ce2019-05-16 13:30:54 -07001787find_ex:
Wei Wang35732d02017-10-06 12:05:57 -07001788#endif
David Aherncc5c0732019-05-22 20:27:58 -07001789 bucket = fib6_nh_get_excptn_bucket(res->nh, NULL);
Wei Wang35732d02017-10-06 12:05:57 -07001790 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1791
1792 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
David Ahern7e4b5122019-04-16 14:36:00 -07001793 ret = rt6_ex->rt6i;
Wei Wang35732d02017-10-06 12:05:57 -07001794
Wei Wang510e2ce2019-05-16 13:30:54 -07001795#ifdef CONFIG_IPV6_SUBTREES
1796 /* Use fib6_src as src_key and redo lookup */
1797 if (!ret && src_key && src_key != &res->f6i->fib6_src.addr) {
1798 src_key = &res->f6i->fib6_src.addr;
1799 goto find_ex;
1800 }
1801#endif
1802
David Ahern7e4b5122019-04-16 14:36:00 -07001803 return ret;
Wei Wang35732d02017-10-06 12:05:57 -07001804}
1805
1806/* Remove the passed in cached rt from the hash table that contains it */
David Aherncc5c0732019-05-22 20:27:58 -07001807static int fib6_nh_remove_exception(const struct fib6_nh *nh, int plen,
David Ahernc0b220c2019-05-22 20:27:57 -07001808 const struct rt6_info *rt)
Wei Wang35732d02017-10-06 12:05:57 -07001809{
David Ahernc0b220c2019-05-22 20:27:57 -07001810 const struct in6_addr *src_key = NULL;
Wei Wang35732d02017-10-06 12:05:57 -07001811 struct rt6_exception_bucket *bucket;
Wei Wang35732d02017-10-06 12:05:57 -07001812 struct rt6_exception *rt6_ex;
1813 int err;
1814
David Aherncc5c0732019-05-22 20:27:58 -07001815 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
Wei Wang35732d02017-10-06 12:05:57 -07001816 return -ENOENT;
1817
1818 spin_lock_bh(&rt6_exception_lock);
David Aherncc5c0732019-05-22 20:27:58 -07001819 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
1820
Wei Wang35732d02017-10-06 12:05:57 -07001821#ifdef CONFIG_IPV6_SUBTREES
David Aherncc5c0732019-05-22 20:27:58 -07001822 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1823 * and exception table is indexed by a hash of
1824 * both rt6i_dst and rt6i_src.
Wei Wang35732d02017-10-06 12:05:57 -07001825 * Otherwise, the exception table is indexed by
1826 * a hash of only rt6i_dst.
1827 */
David Ahernc0b220c2019-05-22 20:27:57 -07001828 if (plen)
Wei Wang35732d02017-10-06 12:05:57 -07001829 src_key = &rt->rt6i_src.addr;
1830#endif
1831 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1832 &rt->rt6i_dst.addr,
1833 src_key);
1834 if (rt6_ex) {
1835 rt6_remove_exception(bucket, rt6_ex);
1836 err = 0;
1837 } else {
1838 err = -ENOENT;
1839 }
1840
1841 spin_unlock_bh(&rt6_exception_lock);
1842 return err;
1843}
1844
David Aherne659ba32019-06-08 14:53:28 -07001845struct fib6_nh_excptn_arg {
1846 struct rt6_info *rt;
1847 int plen;
1848};
1849
1850static int rt6_nh_remove_exception_rt(struct fib6_nh *nh, void *_arg)
1851{
1852 struct fib6_nh_excptn_arg *arg = _arg;
1853 int err;
1854
1855 err = fib6_nh_remove_exception(nh, arg->plen, arg->rt);
1856 if (err == 0)
1857 return 1;
1858
1859 return 0;
1860}
1861
David Ahernc0b220c2019-05-22 20:27:57 -07001862static int rt6_remove_exception_rt(struct rt6_info *rt)
1863{
1864 struct fib6_info *from;
1865
1866 from = rcu_dereference(rt->from);
David Aherncc5c0732019-05-22 20:27:58 -07001867 if (!from || !(rt->rt6i_flags & RTF_CACHE))
David Ahernc0b220c2019-05-22 20:27:57 -07001868 return -EINVAL;
1869
David Aherne659ba32019-06-08 14:53:28 -07001870 if (from->nh) {
1871 struct fib6_nh_excptn_arg arg = {
1872 .rt = rt,
1873 .plen = from->fib6_src.plen
1874 };
1875 int rc;
1876
1877 /* rc = 1 means an entry was found */
1878 rc = nexthop_for_each_fib6_nh(from->nh,
1879 rt6_nh_remove_exception_rt,
1880 &arg);
1881 return rc ? 0 : -ENOENT;
1882 }
1883
David Ahern1cf844c2019-05-22 20:27:59 -07001884 return fib6_nh_remove_exception(from->fib6_nh,
David Aherncc5c0732019-05-22 20:27:58 -07001885 from->fib6_src.plen, rt);
David Ahernc0b220c2019-05-22 20:27:57 -07001886}
1887
Wei Wang35732d02017-10-06 12:05:57 -07001888/* Find rt6_ex which contains the passed in rt cache and
1889 * refresh its stamp
1890 */
David Aherncc5c0732019-05-22 20:27:58 -07001891static void fib6_nh_update_exception(const struct fib6_nh *nh, int plen,
David Ahernc0b220c2019-05-22 20:27:57 -07001892 const struct rt6_info *rt)
Wei Wang35732d02017-10-06 12:05:57 -07001893{
David Ahernc0b220c2019-05-22 20:27:57 -07001894 const struct in6_addr *src_key = NULL;
Wei Wang35732d02017-10-06 12:05:57 -07001895 struct rt6_exception_bucket *bucket;
Wei Wang35732d02017-10-06 12:05:57 -07001896 struct rt6_exception *rt6_ex;
Paolo Abeni193f3682019-02-21 11:19:41 +01001897
David Aherncc5c0732019-05-22 20:27:58 -07001898 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
Wei Wang35732d02017-10-06 12:05:57 -07001899#ifdef CONFIG_IPV6_SUBTREES
David Aherncc5c0732019-05-22 20:27:58 -07001900 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1901 * and exception table is indexed by a hash of
1902 * both rt6i_dst and rt6i_src.
Wei Wang35732d02017-10-06 12:05:57 -07001903 * Otherwise, the exception table is indexed by
1904 * a hash of only rt6i_dst.
1905 */
David Ahernc0b220c2019-05-22 20:27:57 -07001906 if (plen)
Wei Wang35732d02017-10-06 12:05:57 -07001907 src_key = &rt->rt6i_src.addr;
1908#endif
David Aherncc5c0732019-05-22 20:27:58 -07001909 rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key);
Wei Wang35732d02017-10-06 12:05:57 -07001910 if (rt6_ex)
1911 rt6_ex->stamp = jiffies;
David Ahernc0b220c2019-05-22 20:27:57 -07001912}
Wei Wang35732d02017-10-06 12:05:57 -07001913
David Aherne659ba32019-06-08 14:53:28 -07001914struct fib6_nh_match_arg {
1915 const struct net_device *dev;
1916 const struct in6_addr *gw;
1917 struct fib6_nh *match;
1918};
1919
1920/* determine if fib6_nh has given device and gateway */
1921static int fib6_nh_find_match(struct fib6_nh *nh, void *_arg)
1922{
1923 struct fib6_nh_match_arg *arg = _arg;
1924
1925 if (arg->dev != nh->fib_nh_dev ||
1926 (arg->gw && !nh->fib_nh_gw_family) ||
1927 (!arg->gw && nh->fib_nh_gw_family) ||
1928 (arg->gw && !ipv6_addr_equal(arg->gw, &nh->fib_nh_gw6)))
1929 return 0;
1930
1931 arg->match = nh;
1932
1933 /* found a match, break the loop */
1934 return 1;
1935}
1936
David Ahernc0b220c2019-05-22 20:27:57 -07001937static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1938{
1939 struct fib6_info *from;
David Aherne659ba32019-06-08 14:53:28 -07001940 struct fib6_nh *fib6_nh;
David Ahernc0b220c2019-05-22 20:27:57 -07001941
1942 rcu_read_lock();
1943
1944 from = rcu_dereference(rt->from);
1945 if (!from || !(rt->rt6i_flags & RTF_CACHE))
1946 goto unlock;
1947
David Aherne659ba32019-06-08 14:53:28 -07001948 if (from->nh) {
1949 struct fib6_nh_match_arg arg = {
1950 .dev = rt->dst.dev,
1951 .gw = &rt->rt6i_gateway,
1952 };
1953
1954 nexthop_for_each_fib6_nh(from->nh, fib6_nh_find_match, &arg);
1955
1956 if (!arg.match)
David Aherncff6a322019-08-01 14:36:35 -07001957 goto unlock;
David Aherne659ba32019-06-08 14:53:28 -07001958 fib6_nh = arg.match;
1959 } else {
1960 fib6_nh = from->fib6_nh;
1961 }
1962 fib6_nh_update_exception(fib6_nh, from->fib6_src.plen, rt);
Paolo Abeni193f3682019-02-21 11:19:41 +01001963unlock:
Wei Wang35732d02017-10-06 12:05:57 -07001964 rcu_read_unlock();
1965}
1966
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001967static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1968 struct rt6_info *rt, int mtu)
1969{
1970 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1971 * lowest MTU in the path: always allow updating the route PMTU to
1972 * reflect PMTU decreases.
1973 *
1974 * If the new MTU is higher, and the route PMTU is equal to the local
1975 * MTU, this means the old MTU is the lowest in the path, so allow
1976 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1977 * handle this.
1978 */
1979
1980 if (dst_mtu(&rt->dst) >= mtu)
1981 return true;
1982
1983 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1984 return true;
1985
1986 return false;
1987}
1988
1989static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
David Aherncc5c0732019-05-22 20:27:58 -07001990 const struct fib6_nh *nh, int mtu)
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001991{
1992 struct rt6_exception_bucket *bucket;
1993 struct rt6_exception *rt6_ex;
1994 int i;
1995
David Aherncc5c0732019-05-22 20:27:58 -07001996 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001997 if (!bucket)
1998 return;
1999
2000 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2001 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
2002 struct rt6_info *entry = rt6_ex->rt6i;
2003
2004 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
David Ahernd4ead6b2018-04-17 17:33:16 -07002005 * route), the metrics of its rt->from have already
Stefano Brivioe9fa1492018-03-06 11:10:19 +01002006 * been updated.
2007 */
David Ahernd4ead6b2018-04-17 17:33:16 -07002008 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
Stefano Brivioe9fa1492018-03-06 11:10:19 +01002009 rt6_mtu_change_route_allowed(idev, entry, mtu))
David Ahernd4ead6b2018-04-17 17:33:16 -07002010 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
Wei Wangf5bbe7e2017-10-06 12:05:59 -07002011 }
Stefano Brivioe9fa1492018-03-06 11:10:19 +01002012 bucket++;
Wei Wangf5bbe7e2017-10-06 12:05:59 -07002013 }
2014}
2015
Wei Wangb16cb452017-10-06 12:06:00 -07002016#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2017
David Aherncc5c0732019-05-22 20:27:58 -07002018static void fib6_nh_exceptions_clean_tohost(const struct fib6_nh *nh,
2019 const struct in6_addr *gateway)
Wei Wangb16cb452017-10-06 12:06:00 -07002020{
2021 struct rt6_exception_bucket *bucket;
2022 struct rt6_exception *rt6_ex;
2023 struct hlist_node *tmp;
2024 int i;
2025
David Aherncc5c0732019-05-22 20:27:58 -07002026 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
Wei Wangb16cb452017-10-06 12:06:00 -07002027 return;
2028
2029 spin_lock_bh(&rt6_exception_lock);
David Aherncc5c0732019-05-22 20:27:58 -07002030 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
Wei Wangb16cb452017-10-06 12:06:00 -07002031 if (bucket) {
2032 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2033 hlist_for_each_entry_safe(rt6_ex, tmp,
2034 &bucket->chain, hlist) {
2035 struct rt6_info *entry = rt6_ex->rt6i;
2036
2037 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
2038 RTF_CACHE_GATEWAY &&
2039 ipv6_addr_equal(gateway,
2040 &entry->rt6i_gateway)) {
2041 rt6_remove_exception(bucket, rt6_ex);
2042 }
2043 }
2044 bucket++;
2045 }
2046 }
2047
2048 spin_unlock_bh(&rt6_exception_lock);
2049}
2050
Wei Wangc757faa2017-10-06 12:06:01 -07002051static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
2052 struct rt6_exception *rt6_ex,
2053 struct fib6_gc_args *gc_args,
2054 unsigned long now)
2055{
2056 struct rt6_info *rt = rt6_ex->rt6i;
2057
Paolo Abeni1859bac2017-10-19 16:07:11 +02002058 /* we are pruning and obsoleting aged-out and non gateway exceptions
2059 * even if others have still references to them, so that on next
2060 * dst_check() such references can be dropped.
2061 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
2062 * expired, independently from their aging, as per RFC 8201 section 4
2063 */
Wei Wang31afeb42018-01-26 11:40:17 -08002064 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
2065 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
2066 RT6_TRACE("aging clone %p\n", rt);
2067 rt6_remove_exception(bucket, rt6_ex);
2068 return;
2069 }
2070 } else if (time_after(jiffies, rt->dst.expires)) {
2071 RT6_TRACE("purging expired route %p\n", rt);
Wei Wangc757faa2017-10-06 12:06:01 -07002072 rt6_remove_exception(bucket, rt6_ex);
2073 return;
Wei Wang31afeb42018-01-26 11:40:17 -08002074 }
2075
2076 if (rt->rt6i_flags & RTF_GATEWAY) {
Wei Wangc757faa2017-10-06 12:06:01 -07002077 struct neighbour *neigh;
2078 __u8 neigh_flags = 0;
2079
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07002080 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
2081 if (neigh)
Wei Wangc757faa2017-10-06 12:06:01 -07002082 neigh_flags = neigh->flags;
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07002083
Wei Wangc757faa2017-10-06 12:06:01 -07002084 if (!(neigh_flags & NTF_ROUTER)) {
2085 RT6_TRACE("purging route %p via non-router but gateway\n",
2086 rt);
2087 rt6_remove_exception(bucket, rt6_ex);
2088 return;
2089 }
2090 }
Wei Wang31afeb42018-01-26 11:40:17 -08002091
Wei Wangc757faa2017-10-06 12:06:01 -07002092 gc_args->more++;
2093}
2094
David Aherncc5c0732019-05-22 20:27:58 -07002095static void fib6_nh_age_exceptions(const struct fib6_nh *nh,
David Ahernc0b220c2019-05-22 20:27:57 -07002096 struct fib6_gc_args *gc_args,
2097 unsigned long now)
Wei Wangc757faa2017-10-06 12:06:01 -07002098{
2099 struct rt6_exception_bucket *bucket;
2100 struct rt6_exception *rt6_ex;
2101 struct hlist_node *tmp;
2102 int i;
2103
David Aherncc5c0732019-05-22 20:27:58 -07002104 if (!rcu_access_pointer(nh->rt6i_exception_bucket))
Wei Wangc757faa2017-10-06 12:06:01 -07002105 return;
2106
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07002107 rcu_read_lock_bh();
2108 spin_lock(&rt6_exception_lock);
David Aherncc5c0732019-05-22 20:27:58 -07002109 bucket = fib6_nh_get_excptn_bucket(nh, &rt6_exception_lock);
Wei Wangc757faa2017-10-06 12:06:01 -07002110 if (bucket) {
2111 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
2112 hlist_for_each_entry_safe(rt6_ex, tmp,
2113 &bucket->chain, hlist) {
2114 rt6_age_examine_exception(bucket, rt6_ex,
2115 gc_args, now);
2116 }
2117 bucket++;
2118 }
2119 }
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07002120 spin_unlock(&rt6_exception_lock);
2121 rcu_read_unlock_bh();
Wei Wangc757faa2017-10-06 12:06:01 -07002122}
2123
David Aherne659ba32019-06-08 14:53:28 -07002124struct fib6_nh_age_excptn_arg {
2125 struct fib6_gc_args *gc_args;
2126 unsigned long now;
2127};
2128
2129static int rt6_nh_age_exceptions(struct fib6_nh *nh, void *_arg)
2130{
2131 struct fib6_nh_age_excptn_arg *arg = _arg;
2132
2133 fib6_nh_age_exceptions(nh, arg->gc_args, arg->now);
2134 return 0;
2135}
2136
David Aherncc5c0732019-05-22 20:27:58 -07002137void rt6_age_exceptions(struct fib6_info *f6i,
David Ahernc0b220c2019-05-22 20:27:57 -07002138 struct fib6_gc_args *gc_args,
2139 unsigned long now)
2140{
David Aherne659ba32019-06-08 14:53:28 -07002141 if (f6i->nh) {
2142 struct fib6_nh_age_excptn_arg arg = {
2143 .gc_args = gc_args,
2144 .now = now
2145 };
2146
2147 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_age_exceptions,
2148 &arg);
2149 } else {
2150 fib6_nh_age_exceptions(f6i->fib6_nh, gc_args, now);
2151 }
David Ahernc0b220c2019-05-22 20:27:57 -07002152}
2153
David Ahern1d053da2018-05-09 20:34:21 -07002154/* must be called with rcu lock held */
David Aherneffda4d2019-04-16 14:36:10 -07002155int fib6_table_lookup(struct net *net, struct fib6_table *table, int oif,
2156 struct flowi6 *fl6, struct fib6_result *res, int strict)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157{
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07002158 struct fib6_node *fn, *saved_fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159
David Ahern64547432018-05-09 20:34:19 -07002160 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07002161 saved_fn = fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162
David Ahernca254492015-10-12 11:47:10 -07002163 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2164 oif = 0;
2165
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07002166redo_rt6_select:
David Aherneffda4d2019-04-16 14:36:10 -07002167 rt6_select(net, fn, oif, res, strict);
2168 if (res->f6i == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07002169 fn = fib6_backtrack(fn, &fl6->saddr);
2170 if (fn)
2171 goto redo_rt6_select;
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07002172 else if (strict & RT6_LOOKUP_F_REACHABLE) {
2173 /* also consider unreachable route */
2174 strict &= ~RT6_LOOKUP_F_REACHABLE;
2175 fn = saved_fn;
2176 goto redo_rt6_select;
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07002177 }
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07002178 }
2179
David Aherneffda4d2019-04-16 14:36:10 -07002180 trace_fib6_table_lookup(net, res, table, fl6);
YOSHIFUJI Hideakifb9de912006-03-20 16:59:08 -08002181
David Aherneffda4d2019-04-16 14:36:10 -07002182 return 0;
David Ahern1d053da2018-05-09 20:34:21 -07002183}
2184
2185struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
2186 int oif, struct flowi6 *fl6,
2187 const struct sk_buff *skb, int flags)
2188{
David Ahernb1d40992019-04-16 14:35:59 -07002189 struct fib6_result res = {};
Wei Wang0e09edc2019-06-20 17:36:37 -07002190 struct rt6_info *rt = NULL;
David Ahern1d053da2018-05-09 20:34:21 -07002191 int strict = 0;
2192
Wei Wang0e09edc2019-06-20 17:36:37 -07002193 WARN_ON_ONCE((flags & RT6_LOOKUP_F_DST_NOREF) &&
2194 !rcu_read_lock_held());
2195
David Ahern1d053da2018-05-09 20:34:21 -07002196 strict |= flags & RT6_LOOKUP_F_IFACE;
2197 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
2198 if (net->ipv6.devconf_all->forwarding == 0)
2199 strict |= RT6_LOOKUP_F_REACHABLE;
2200
2201 rcu_read_lock();
2202
David Aherneffda4d2019-04-16 14:36:10 -07002203 fib6_table_lookup(net, table, oif, fl6, &res, strict);
Wei Wang0e09edc2019-06-20 17:36:37 -07002204 if (res.f6i == net->ipv6.fib6_null_entry)
2205 goto out;
David Ahern23fb93a2018-04-17 17:33:23 -07002206
David Ahernb1d40992019-04-16 14:35:59 -07002207 fib6_select_path(net, &res, fl6, oif, false, skb, strict);
David Ahernd83009d2019-04-09 14:41:17 -07002208
David Ahern23fb93a2018-04-17 17:33:23 -07002209 /*Search through exception table */
David Ahern7e4b5122019-04-16 14:36:00 -07002210 rt = rt6_find_cached_rt(&res, &fl6->daddr, &fl6->saddr);
David Ahern23fb93a2018-04-17 17:33:23 -07002211 if (rt) {
Wei Wang0e09edc2019-06-20 17:36:37 -07002212 goto out;
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002213 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
David Ahernb1d40992019-04-16 14:35:59 -07002214 !res.nh->fib_nh_gw_family)) {
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002215 /* Create a RTF_CACHE clone which will not be
2216 * owned by the fib6 tree. It is for the special case where
2217 * the daddr in the skb during the neighbor look-up is different
2218 * from the fl6->daddr used to look-up route here.
2219 */
Wei Wang0e09edc2019-06-20 17:36:37 -07002220 rt = ip6_rt_cache_alloc(&res, &fl6->daddr, NULL);
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002221
Wei Wang0e09edc2019-06-20 17:36:37 -07002222 if (rt) {
2223 /* 1 refcnt is taken during ip6_rt_cache_alloc().
2224 * As rt6_uncached_list_add() does not consume refcnt,
2225 * this refcnt is always returned to the caller even
2226 * if caller sets RT6_LOOKUP_F_DST_NOREF flag.
Wei Wang1cfb71e2017-06-17 10:42:33 -07002227 */
Wei Wang0e09edc2019-06-20 17:36:37 -07002228 rt6_uncached_list_add(rt);
Wei Wang81eb8442017-10-06 12:06:11 -07002229 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
Wei Wang0e09edc2019-06-20 17:36:37 -07002230 rcu_read_unlock();
David Ahernb8115802015-11-19 12:24:22 -08002231
Wei Wang0e09edc2019-06-20 17:36:37 -07002232 return rt;
2233 }
Martin KaFai Laud52d3992015-05-22 20:56:06 -07002234 } else {
2235 /* Get a percpu copy */
Eric Dumazet951f7882017-10-08 21:07:18 -07002236 local_bh_disable();
Wei Wang0e09edc2019-06-20 17:36:37 -07002237 rt = rt6_get_pcpu_route(&res);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07002238
Wei Wang0e09edc2019-06-20 17:36:37 -07002239 if (!rt)
2240 rt = rt6_make_pcpu_route(net, &res);
David Ahern93531c62018-04-17 17:33:25 -07002241
Eric Dumazet951f7882017-10-08 21:07:18 -07002242 local_bh_enable();
Martin KaFai Laud52d3992015-05-22 20:56:06 -07002243 }
Wei Wang0e09edc2019-06-20 17:36:37 -07002244out:
2245 if (!rt)
2246 rt = net->ipv6.ip6_null_entry;
2247 if (!(flags & RT6_LOOKUP_F_DST_NOREF))
2248 ip6_hold_safe(net, &rt);
2249 rcu_read_unlock();
2250
2251 return rt;
Thomas Grafc71099a2006-08-04 23:20:06 -07002252}
David Ahern9ff74382016-06-13 13:44:19 -07002253EXPORT_SYMBOL_GPL(ip6_pol_route);
Thomas Grafc71099a2006-08-04 23:20:06 -07002254
David Ahernb75cc8f2018-03-02 08:32:17 -08002255static struct rt6_info *ip6_pol_route_input(struct net *net,
2256 struct fib6_table *table,
2257 struct flowi6 *fl6,
2258 const struct sk_buff *skb,
2259 int flags)
Pavel Emelyanov4acad722007-10-15 13:02:51 -07002260{
David Ahernb75cc8f2018-03-02 08:32:17 -08002261 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
Pavel Emelyanov4acad722007-10-15 13:02:51 -07002262}
2263
Mahesh Bandeward409b842016-09-16 12:59:08 -07002264struct dst_entry *ip6_route_input_lookup(struct net *net,
2265 struct net_device *dev,
David Ahernb75cc8f2018-03-02 08:32:17 -08002266 struct flowi6 *fl6,
2267 const struct sk_buff *skb,
2268 int flags)
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00002269{
2270 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
2271 flags |= RT6_LOOKUP_F_IFACE;
2272
David Ahernb75cc8f2018-03-02 08:32:17 -08002273 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00002274}
Mahesh Bandeward409b842016-09-16 12:59:08 -07002275EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00002276
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002277static void ip6_multipath_l3_keys(const struct sk_buff *skb,
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002278 struct flow_keys *keys,
2279 struct flow_keys *flkeys)
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002280{
2281 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
2282 const struct ipv6hdr *key_iph = outer_iph;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002283 struct flow_keys *_flkeys = flkeys;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002284 const struct ipv6hdr *inner_iph;
2285 const struct icmp6hdr *icmph;
2286 struct ipv6hdr _inner_iph;
Eric Dumazetcea67a22018-04-29 09:54:59 -07002287 struct icmp6hdr _icmph;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002288
2289 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
2290 goto out;
2291
Eric Dumazetcea67a22018-04-29 09:54:59 -07002292 icmph = skb_header_pointer(skb, skb_transport_offset(skb),
2293 sizeof(_icmph), &_icmph);
2294 if (!icmph)
2295 goto out;
2296
Matteo Croce54074f12019-11-02 01:12:04 +01002297 if (!icmpv6_is_err(icmph->icmp6_type))
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002298 goto out;
2299
2300 inner_iph = skb_header_pointer(skb,
2301 skb_transport_offset(skb) + sizeof(*icmph),
2302 sizeof(_inner_iph), &_inner_iph);
2303 if (!inner_iph)
2304 goto out;
2305
2306 key_iph = inner_iph;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002307 _flkeys = NULL;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002308out:
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002309 if (_flkeys) {
2310 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
2311 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
2312 keys->tags.flow_label = _flkeys->tags.flow_label;
2313 keys->basic.ip_proto = _flkeys->basic.ip_proto;
2314 } else {
2315 keys->addrs.v6addrs.src = key_iph->saddr;
2316 keys->addrs.v6addrs.dst = key_iph->daddr;
Michal Kubecekfa1be7e2018-06-04 11:36:05 +02002317 keys->tags.flow_label = ip6_flowlabel(key_iph);
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002318 keys->basic.ip_proto = key_iph->nexthdr;
2319 }
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002320}
2321
2322/* if skb is set it will be used and fl6 can be NULL */
David Ahernb4bac172018-03-02 08:32:18 -08002323u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
2324 const struct sk_buff *skb, struct flow_keys *flkeys)
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002325{
2326 struct flow_keys hash_keys;
David Ahern9a2a5372018-03-02 08:32:15 -08002327 u32 mhash;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002328
David S. Millerbbfa0472018-03-12 11:09:33 -04002329 switch (ip6_multipath_hash_policy(net)) {
David Ahernb4bac172018-03-02 08:32:18 -08002330 case 0:
2331 memset(&hash_keys, 0, sizeof(hash_keys));
2332 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2333 if (skb) {
2334 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2335 } else {
2336 hash_keys.addrs.v6addrs.src = fl6->saddr;
2337 hash_keys.addrs.v6addrs.dst = fl6->daddr;
Michal Kubecekfa1be7e2018-06-04 11:36:05 +02002338 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
David Ahernb4bac172018-03-02 08:32:18 -08002339 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2340 }
2341 break;
2342 case 1:
2343 if (skb) {
2344 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2345 struct flow_keys keys;
2346
2347 /* short-circuit if we already have L4 hash present */
2348 if (skb->l4_hash)
2349 return skb_get_hash_raw(skb) >> 1;
2350
2351 memset(&hash_keys, 0, sizeof(hash_keys));
2352
2353 if (!flkeys) {
2354 skb_flow_dissect_flow_keys(skb, &keys, flag);
2355 flkeys = &keys;
2356 }
2357 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2358 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2359 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2360 hash_keys.ports.src = flkeys->ports.src;
2361 hash_keys.ports.dst = flkeys->ports.dst;
2362 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2363 } else {
2364 memset(&hash_keys, 0, sizeof(hash_keys));
2365 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2366 hash_keys.addrs.v6addrs.src = fl6->saddr;
2367 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2368 hash_keys.ports.src = fl6->fl6_sport;
2369 hash_keys.ports.dst = fl6->fl6_dport;
2370 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2371 }
2372 break;
Stephen Suryaputrad8f74f02019-07-06 10:55:18 -04002373 case 2:
2374 memset(&hash_keys, 0, sizeof(hash_keys));
2375 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2376 if (skb) {
2377 struct flow_keys keys;
2378
2379 if (!flkeys) {
2380 skb_flow_dissect_flow_keys(skb, &keys, 0);
2381 flkeys = &keys;
2382 }
2383
2384 /* Inner can be v4 or v6 */
2385 if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2386 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2387 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2388 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2389 } else if (flkeys->control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2390 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2391 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2392 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2393 hash_keys.tags.flow_label = flkeys->tags.flow_label;
2394 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2395 } else {
2396 /* Same as case 0 */
2397 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2398 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2399 }
2400 } else {
2401 /* Same as case 0 */
2402 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2403 hash_keys.addrs.v6addrs.src = fl6->saddr;
2404 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2405 hash_keys.tags.flow_label = (__force u32)flowi6_get_flowlabel(fl6);
2406 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2407 }
2408 break;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002409 }
David Ahern9a2a5372018-03-02 08:32:15 -08002410 mhash = flow_hash_from_keys(&hash_keys);
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002411
David Ahern9a2a5372018-03-02 08:32:15 -08002412 return mhash >> 1;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002413}
2414
Wei Wang67f415d2019-06-20 17:36:40 -07002415/* Called with rcu held */
Thomas Grafc71099a2006-08-04 23:20:06 -07002416void ip6_route_input(struct sk_buff *skb)
2417{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00002418 const struct ipv6hdr *iph = ipv6_hdr(skb);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002419 struct net *net = dev_net(skb->dev);
Wei Wang67f415d2019-06-20 17:36:40 -07002420 int flags = RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_DST_NOREF;
Jiri Benc904af042015-08-20 13:56:31 +02002421 struct ip_tunnel_info *tun_info;
David S. Miller4c9483b2011-03-12 16:22:43 -05002422 struct flowi6 fl6 = {
David Aherne0d56fd2016-09-10 12:09:57 -07002423 .flowi6_iif = skb->dev->ifindex,
David S. Miller4c9483b2011-03-12 16:22:43 -05002424 .daddr = iph->daddr,
2425 .saddr = iph->saddr,
YOSHIFUJI Hideaki / 吉藤英明6502ca52013-01-13 05:01:51 +00002426 .flowlabel = ip6_flowinfo(iph),
David S. Miller4c9483b2011-03-12 16:22:43 -05002427 .flowi6_mark = skb->mark,
2428 .flowi6_proto = iph->nexthdr,
Thomas Grafc71099a2006-08-04 23:20:06 -07002429 };
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002430 struct flow_keys *flkeys = NULL, _flkeys;
Thomas Grafadaa70b2006-10-13 15:01:03 -07002431
Jiri Benc904af042015-08-20 13:56:31 +02002432 tun_info = skb_tunnel_info(skb);
Jiri Benc46fa0622015-08-28 20:48:19 +02002433 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
Jiri Benc904af042015-08-20 13:56:31 +02002434 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002435
2436 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2437 flkeys = &_flkeys;
2438
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002439 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
David Ahernb4bac172018-03-02 08:32:18 -08002440 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
Jiri Benc06e9d042015-08-20 13:56:26 +02002441 skb_dst_drop(skb);
Wei Wang67f415d2019-06-20 17:36:40 -07002442 skb_dst_set_noref(skb, ip6_route_input_lookup(net, skb->dev,
2443 &fl6, skb, flags));
Thomas Grafc71099a2006-08-04 23:20:06 -07002444}
2445
David Ahernb75cc8f2018-03-02 08:32:17 -08002446static struct rt6_info *ip6_pol_route_output(struct net *net,
2447 struct fib6_table *table,
2448 struct flowi6 *fl6,
2449 const struct sk_buff *skb,
2450 int flags)
Thomas Grafc71099a2006-08-04 23:20:06 -07002451{
David Ahernb75cc8f2018-03-02 08:32:17 -08002452 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
Thomas Grafc71099a2006-08-04 23:20:06 -07002453}
2454
Wei Wang7d9e5f42019-06-20 17:36:41 -07002455struct dst_entry *ip6_route_output_flags_noref(struct net *net,
2456 const struct sock *sk,
2457 struct flowi6 *fl6, int flags)
Thomas Grafc71099a2006-08-04 23:20:06 -07002458{
David Ahernd46a9d62015-10-21 08:42:22 -07002459 bool any_src;
Thomas Grafc71099a2006-08-04 23:20:06 -07002460
Robert Shearman3ede0bb2018-09-19 13:56:53 +01002461 if (ipv6_addr_type(&fl6->daddr) &
2462 (IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL)) {
David Ahern4c1feac2016-09-10 12:09:56 -07002463 struct dst_entry *dst;
2464
Wei Wang7d9e5f42019-06-20 17:36:41 -07002465 /* This function does not take refcnt on the dst */
David Ahern4c1feac2016-09-10 12:09:56 -07002466 dst = l3mdev_link_scope_lookup(net, fl6);
2467 if (dst)
2468 return dst;
2469 }
David Ahernca254492015-10-12 11:47:10 -07002470
Pavel Emelyanov1fb94892012-08-08 21:53:36 +00002471 fl6->flowi6_iif = LOOPBACK_IFINDEX;
David McCullough4dc27d1c2012-06-25 15:42:26 +00002472
Wei Wang7d9e5f42019-06-20 17:36:41 -07002473 flags |= RT6_LOOKUP_F_DST_NOREF;
David Ahernd46a9d62015-10-21 08:42:22 -07002474 any_src = ipv6_addr_any(&fl6->saddr);
David Ahern741a11d2015-09-28 10:12:13 -07002475 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
David Ahernd46a9d62015-10-21 08:42:22 -07002476 (fl6->flowi6_oif && any_src))
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -07002477 flags |= RT6_LOOKUP_F_IFACE;
Thomas Grafc71099a2006-08-04 23:20:06 -07002478
David Ahernd46a9d62015-10-21 08:42:22 -07002479 if (!any_src)
Thomas Grafadaa70b2006-10-13 15:01:03 -07002480 flags |= RT6_LOOKUP_F_HAS_SADDR;
YOSHIFUJI Hideaki / 吉藤英明0c9a2ac2010-03-07 00:14:44 +00002481 else if (sk)
2482 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
Thomas Grafadaa70b2006-10-13 15:01:03 -07002483
David Ahernb75cc8f2018-03-02 08:32:17 -08002484 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485}
Wei Wang7d9e5f42019-06-20 17:36:41 -07002486EXPORT_SYMBOL_GPL(ip6_route_output_flags_noref);
2487
2488struct dst_entry *ip6_route_output_flags(struct net *net,
2489 const struct sock *sk,
2490 struct flowi6 *fl6,
2491 int flags)
2492{
2493 struct dst_entry *dst;
2494 struct rt6_info *rt6;
2495
2496 rcu_read_lock();
2497 dst = ip6_route_output_flags_noref(net, sk, fl6, flags);
2498 rt6 = (struct rt6_info *)dst;
2499 /* For dst cached in uncached_list, refcnt is already taken. */
2500 if (list_empty(&rt6->rt6i_uncached) && !dst_hold_safe(dst)) {
2501 dst = &net->ipv6.ip6_null_entry->dst;
2502 dst_hold(dst);
2503 }
2504 rcu_read_unlock();
2505
2506 return dst;
2507}
Paolo Abeni6f21c962016-01-29 12:30:19 +01002508EXPORT_SYMBOL_GPL(ip6_route_output_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
David S. Miller2774c132011-03-01 14:59:04 -08002510struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
David S. Miller14e50e52007-05-24 18:17:54 -07002511{
David S. Miller5c1e6aa2011-04-28 14:13:38 -07002512 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
Wei Wang1dbe32522017-06-17 10:42:26 -07002513 struct net_device *loopback_dev = net->loopback_dev;
David S. Miller14e50e52007-05-24 18:17:54 -07002514 struct dst_entry *new = NULL;
2515
Wei Wang1dbe32522017-06-17 10:42:26 -07002516 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
Steffen Klassert62cf27e2017-10-09 08:39:43 +02002517 DST_OBSOLETE_DEAD, 0);
David S. Miller14e50e52007-05-24 18:17:54 -07002518 if (rt) {
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002519 rt6_info_init(rt);
Wei Wang81eb8442017-10-06 12:06:11 -07002520 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002521
Changli Gaod8d1f302010-06-10 23:31:35 -07002522 new = &rt->dst;
David S. Miller14e50e52007-05-24 18:17:54 -07002523 new->__use = 1;
Herbert Xu352e5122007-11-13 21:34:06 -08002524 new->input = dst_discard;
Eric W. Biedermanede20592015-10-07 16:48:47 -05002525 new->output = dst_discard_out;
David S. Miller14e50e52007-05-24 18:17:54 -07002526
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002527 dst_copy_metrics(new, &ort->dst);
David S. Miller14e50e52007-05-24 18:17:54 -07002528
Wei Wang1dbe32522017-06-17 10:42:26 -07002529 rt->rt6i_idev = in6_dev_get(loopback_dev);
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00002530 rt->rt6i_gateway = ort->rt6i_gateway;
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002531 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
David S. Miller14e50e52007-05-24 18:17:54 -07002532
2533 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2534#ifdef CONFIG_IPV6_SUBTREES
2535 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2536#endif
David S. Miller14e50e52007-05-24 18:17:54 -07002537 }
2538
David S. Miller69ead7a2011-03-01 14:45:33 -08002539 dst_release(dst_orig);
2540 return new ? new : ERR_PTR(-ENOMEM);
David S. Miller14e50e52007-05-24 18:17:54 -07002541}
David S. Miller14e50e52007-05-24 18:17:54 -07002542
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543/*
2544 * Destination cache support functions
2545 */
2546
David Ahern8d1c8022018-04-17 17:33:26 -07002547static bool fib6_check(struct fib6_info *f6i, u32 cookie)
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002548{
Steffen Klassert36143642017-08-25 09:05:42 +02002549 u32 rt_cookie = 0;
Wei Wangc5cff852017-08-21 09:47:10 -07002550
David Ahern8ae86972018-04-20 15:38:03 -07002551 if (!fib6_get_cookie_safe(f6i, &rt_cookie) || rt_cookie != cookie)
David Ahern93531c62018-04-17 17:33:25 -07002552 return false;
2553
2554 if (fib6_check_expired(f6i))
2555 return false;
2556
2557 return true;
2558}
2559
David Aherna68886a2018-04-20 15:38:02 -07002560static struct dst_entry *rt6_check(struct rt6_info *rt,
2561 struct fib6_info *from,
2562 u32 cookie)
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002563{
Wei Wangc5cff852017-08-21 09:47:10 -07002564 u32 rt_cookie = 0;
2565
David Ahern49d05fe2019-07-17 15:08:43 -07002566 if (!from || !fib6_get_cookie_safe(from, &rt_cookie) ||
David Ahern93531c62018-04-17 17:33:25 -07002567 rt_cookie != cookie)
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002568 return NULL;
2569
2570 if (rt6_check_expired(rt))
2571 return NULL;
2572
2573 return &rt->dst;
2574}
2575
David Aherna68886a2018-04-20 15:38:02 -07002576static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt,
2577 struct fib6_info *from,
2578 u32 cookie)
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002579{
Martin KaFai Lau5973fb12015-11-11 11:51:07 -08002580 if (!__rt6_check_expired(rt) &&
2581 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
David Aherna68886a2018-04-20 15:38:02 -07002582 fib6_check(from, cookie))
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002583 return &rt->dst;
2584 else
2585 return NULL;
2586}
2587
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2589{
David Aherna87b7dc2018-04-20 15:38:00 -07002590 struct dst_entry *dst_ret;
David Aherna68886a2018-04-20 15:38:02 -07002591 struct fib6_info *from;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 struct rt6_info *rt;
2593
David Aherna87b7dc2018-04-20 15:38:00 -07002594 rt = container_of(dst, struct rt6_info, dst);
2595
2596 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597
Nicolas Dichtel6f3118b2012-09-10 22:09:46 +00002598 /* All IPV6 dsts are created with ->obsolete set to the value
2599 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2600 * into this function always.
2601 */
Hannes Frederic Sowae3bc10b2013-10-24 07:48:24 +02002602
David Aherna68886a2018-04-20 15:38:02 -07002603 from = rcu_dereference(rt->from);
Martin KaFai Lau4b32b5a2015-04-28 13:03:06 -07002604
David Aherna68886a2018-04-20 15:38:02 -07002605 if (from && (rt->rt6i_flags & RTF_PCPU ||
2606 unlikely(!list_empty(&rt->rt6i_uncached))))
2607 dst_ret = rt6_dst_from_check(rt, from, cookie);
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002608 else
David Aherna68886a2018-04-20 15:38:02 -07002609 dst_ret = rt6_check(rt, from, cookie);
David Aherna87b7dc2018-04-20 15:38:00 -07002610
2611 rcu_read_unlock();
2612
2613 return dst_ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614}
2615
2616static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2617{
2618 struct rt6_info *rt = (struct rt6_info *) dst;
2619
2620 if (rt) {
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002621 if (rt->rt6i_flags & RTF_CACHE) {
David Ahernc3c14da2018-04-23 11:32:06 -07002622 rcu_read_lock();
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002623 if (rt6_check_expired(rt)) {
David Ahern93531c62018-04-17 17:33:25 -07002624 rt6_remove_exception_rt(rt);
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002625 dst = NULL;
2626 }
David Ahernc3c14da2018-04-23 11:32:06 -07002627 rcu_read_unlock();
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002628 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 dst_release(dst);
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002630 dst = NULL;
2631 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 }
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002633 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634}
2635
2636static void ip6_link_failure(struct sk_buff *skb)
2637{
2638 struct rt6_info *rt;
2639
Alexey Dobriyan3ffe5332010-02-18 08:25:24 +00002640 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641
Eric Dumazetadf30902009-06-02 05:19:30 +00002642 rt = (struct rt6_info *) skb_dst(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 if (rt) {
David Ahern8a14e462018-04-23 11:32:07 -07002644 rcu_read_lock();
Hannes Frederic Sowa1eb4f752013-07-10 23:00:57 +02002645 if (rt->rt6i_flags & RTF_CACHE) {
Xin Long761f6022018-11-14 00:48:28 +08002646 rt6_remove_exception_rt(rt);
Wei Wangc5cff852017-08-21 09:47:10 -07002647 } else {
David Aherna68886a2018-04-20 15:38:02 -07002648 struct fib6_info *from;
Wei Wangc5cff852017-08-21 09:47:10 -07002649 struct fib6_node *fn;
2650
David Aherna68886a2018-04-20 15:38:02 -07002651 from = rcu_dereference(rt->from);
2652 if (from) {
2653 fn = rcu_dereference(from->fib6_node);
2654 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2655 fn->fn_sernum = -1;
2656 }
Hannes Frederic Sowa1eb4f752013-07-10 23:00:57 +02002657 }
David Ahern8a14e462018-04-23 11:32:07 -07002658 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 }
2660}
2661
David Ahern6a3e0302018-04-20 15:37:57 -07002662static void rt6_update_expires(struct rt6_info *rt0, int timeout)
2663{
David Aherna68886a2018-04-20 15:38:02 -07002664 if (!(rt0->rt6i_flags & RTF_EXPIRES)) {
2665 struct fib6_info *from;
2666
2667 rcu_read_lock();
2668 from = rcu_dereference(rt0->from);
2669 if (from)
2670 rt0->dst.expires = from->expires;
2671 rcu_read_unlock();
2672 }
David Ahern6a3e0302018-04-20 15:37:57 -07002673
2674 dst_set_expires(&rt0->dst, timeout);
2675 rt0->rt6i_flags |= RTF_EXPIRES;
2676}
2677
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002678static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2679{
2680 struct net *net = dev_net(rt->dst.dev);
2681
David Ahernd4ead6b2018-04-17 17:33:16 -07002682 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002683 rt->rt6i_flags |= RTF_MODIFIED;
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002684 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2685}
2686
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002687static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2688{
2689 return !(rt->rt6i_flags & RTF_CACHE) &&
Paolo Abeni1490ed22019-02-15 18:15:37 +01002690 (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->from));
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002691}
2692
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002693static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
Hangbin Liubd085ef2019-12-22 10:51:09 +08002694 const struct ipv6hdr *iph, u32 mtu,
2695 bool confirm_neigh)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696{
Julian Anastasov0dec8792017-02-06 23:14:16 +02002697 const struct in6_addr *daddr, *saddr;
Ian Morris67ba4152014-08-24 21:53:10 +01002698 struct rt6_info *rt6 = (struct rt6_info *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
Xin Long19bda362016-10-28 18:18:01 +08002700 if (dst_metric_locked(dst, RTAX_MTU))
2701 return;
2702
Julian Anastasov0dec8792017-02-06 23:14:16 +02002703 if (iph) {
2704 daddr = &iph->daddr;
2705 saddr = &iph->saddr;
2706 } else if (sk) {
2707 daddr = &sk->sk_v6_daddr;
2708 saddr = &inet6_sk(sk)->saddr;
2709 } else {
2710 daddr = NULL;
2711 saddr = NULL;
2712 }
Hangbin Liubd085ef2019-12-22 10:51:09 +08002713
2714 if (confirm_neigh)
2715 dst_confirm_neigh(dst, daddr);
2716
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002717 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2718 if (mtu >= dst_mtu(dst))
2719 return;
David S. Miller81aded22012-06-15 14:54:11 -07002720
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002721 if (!rt6_cache_allowed_for_pmtu(rt6)) {
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002722 rt6_do_update_pmtu(rt6, mtu);
Wei Wang2b760fc2017-10-06 12:06:03 -07002723 /* update rt6_ex->stamp for cache */
2724 if (rt6->rt6i_flags & RTF_CACHE)
2725 rt6_update_exception_stamp_rt(rt6);
Julian Anastasov0dec8792017-02-06 23:14:16 +02002726 } else if (daddr) {
David Ahern85bd05d2019-04-16 14:36:01 -07002727 struct fib6_result res = {};
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002728 struct rt6_info *nrt6;
Hagen Paul Pfeifer9d289712015-01-15 22:34:25 +01002729
David Ahern4d85cd02018-04-20 15:37:59 -07002730 rcu_read_lock();
David Ahern85bd05d2019-04-16 14:36:01 -07002731 res.f6i = rcu_dereference(rt6->from);
David Ahern43a4b602019-08-01 15:18:08 -07002732 if (!res.f6i)
2733 goto out_unlock;
2734
David Ahern7d21fec2019-04-16 14:36:11 -07002735 res.fib6_flags = res.f6i->fib6_flags;
2736 res.fib6_type = res.f6i->fib6_type;
2737
David Ahern2d442342019-06-08 14:53:31 -07002738 if (res.f6i->nh) {
2739 struct fib6_nh_match_arg arg = {
2740 .dev = dst->dev,
2741 .gw = &rt6->rt6i_gateway,
2742 };
2743
2744 nexthop_for_each_fib6_nh(res.f6i->nh,
2745 fib6_nh_find_match, &arg);
2746
2747 /* fib6_info uses a nexthop that does not have fib6_nh
2748 * using the dst->dev + gw. Should be impossible.
2749 */
David Ahern43a4b602019-08-01 15:18:08 -07002750 if (!arg.match)
2751 goto out_unlock;
David Ahern2d442342019-06-08 14:53:31 -07002752
2753 res.nh = arg.match;
2754 } else {
2755 res.nh = res.f6i->fib6_nh;
2756 }
2757
David Ahern85bd05d2019-04-16 14:36:01 -07002758 nrt6 = ip6_rt_cache_alloc(&res, daddr, saddr);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002759 if (nrt6) {
2760 rt6_do_update_pmtu(nrt6, mtu);
David Ahern5012f0a2019-04-16 14:36:05 -07002761 if (rt6_insert_exception(nrt6, &res))
Wei Wang2b760fc2017-10-06 12:06:03 -07002762 dst_release_immediate(&nrt6->dst);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002763 }
David Ahern43a4b602019-08-01 15:18:08 -07002764out_unlock:
David Aherna68886a2018-04-20 15:38:02 -07002765 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 }
2767}
2768
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002769static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
Hangbin Liubd085ef2019-12-22 10:51:09 +08002770 struct sk_buff *skb, u32 mtu,
2771 bool confirm_neigh)
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002772{
Hangbin Liubd085ef2019-12-22 10:51:09 +08002773 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu,
2774 confirm_neigh);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002775}
2776
David S. Miller42ae66c2012-06-15 20:01:57 -07002777void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002778 int oif, u32 mark, kuid_t uid)
David S. Miller81aded22012-06-15 14:54:11 -07002779{
2780 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2781 struct dst_entry *dst;
Maciej Żenczykowskidc920952018-09-29 23:44:51 -07002782 struct flowi6 fl6 = {
2783 .flowi6_oif = oif,
2784 .flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark),
2785 .daddr = iph->daddr,
2786 .saddr = iph->saddr,
2787 .flowlabel = ip6_flowinfo(iph),
2788 .flowi6_uid = uid,
2789 };
David S. Miller81aded22012-06-15 14:54:11 -07002790
2791 dst = ip6_route_output(net, NULL, &fl6);
2792 if (!dst->error)
Hangbin Liubd085ef2019-12-22 10:51:09 +08002793 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu), true);
David S. Miller81aded22012-06-15 14:54:11 -07002794 dst_release(dst);
2795}
2796EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2797
2798void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2799{
David Ahern7ddacfa2018-11-18 10:45:30 -08002800 int oif = sk->sk_bound_dev_if;
Martin KaFai Lau33c162a2016-04-11 15:29:36 -07002801 struct dst_entry *dst;
2802
David Ahern7ddacfa2018-11-18 10:45:30 -08002803 if (!oif && skb->dev)
2804 oif = l3mdev_master_ifindex(skb->dev);
2805
2806 ip6_update_pmtu(skb, sock_net(sk), mtu, oif, sk->sk_mark, sk->sk_uid);
Martin KaFai Lau33c162a2016-04-11 15:29:36 -07002807
2808 dst = __sk_dst_get(sk);
2809 if (!dst || !dst->obsolete ||
2810 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2811 return;
2812
2813 bh_lock_sock(sk);
2814 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2815 ip6_datagram_dst_update(sk, false);
2816 bh_unlock_sock(sk);
David S. Miller81aded22012-06-15 14:54:11 -07002817}
2818EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2819
Alexey Kodanev7d6850f2018-04-03 15:00:07 +03002820void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2821 const struct flowi6 *fl6)
2822{
2823#ifdef CONFIG_IPV6_SUBTREES
2824 struct ipv6_pinfo *np = inet6_sk(sk);
2825#endif
2826
2827 ip6_dst_store(sk, dst,
2828 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2829 &sk->sk_v6_daddr : NULL,
2830#ifdef CONFIG_IPV6_SUBTREES
2831 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2832 &np->saddr :
2833#endif
2834 NULL);
2835}
2836
David Ahern9b6b35a2019-04-16 14:36:02 -07002837static bool ip6_redirect_nh_match(const struct fib6_result *res,
David Ahern0b34eb02019-04-09 14:41:19 -07002838 struct flowi6 *fl6,
2839 const struct in6_addr *gw,
2840 struct rt6_info **ret)
2841{
David Ahern9b6b35a2019-04-16 14:36:02 -07002842 const struct fib6_nh *nh = res->nh;
2843
David Ahern0b34eb02019-04-09 14:41:19 -07002844 if (nh->fib_nh_flags & RTNH_F_DEAD || !nh->fib_nh_gw_family ||
2845 fl6->flowi6_oif != nh->fib_nh_dev->ifindex)
2846 return false;
2847
2848 /* rt_cache's gateway might be different from its 'parent'
2849 * in the case of an ip redirect.
2850 * So we keep searching in the exception table if the gateway
2851 * is different.
2852 */
2853 if (!ipv6_addr_equal(gw, &nh->fib_nh_gw6)) {
2854 struct rt6_info *rt_cache;
2855
David Ahern9b6b35a2019-04-16 14:36:02 -07002856 rt_cache = rt6_find_cached_rt(res, &fl6->daddr, &fl6->saddr);
David Ahern0b34eb02019-04-09 14:41:19 -07002857 if (rt_cache &&
2858 ipv6_addr_equal(gw, &rt_cache->rt6i_gateway)) {
2859 *ret = rt_cache;
2860 return true;
2861 }
2862 return false;
2863 }
2864 return true;
2865}
2866
David Ahernc55c8982019-06-08 14:53:29 -07002867struct fib6_nh_rd_arg {
2868 struct fib6_result *res;
2869 struct flowi6 *fl6;
2870 const struct in6_addr *gw;
2871 struct rt6_info **ret;
2872};
2873
2874static int fib6_nh_redirect_match(struct fib6_nh *nh, void *_arg)
2875{
2876 struct fib6_nh_rd_arg *arg = _arg;
2877
2878 arg->res->nh = nh;
2879 return ip6_redirect_nh_match(arg->res, arg->fl6, arg->gw, arg->ret);
2880}
2881
Duan Jiongb55b76b2013-09-04 19:44:21 +08002882/* Handle redirects */
2883struct ip6rd_flowi {
2884 struct flowi6 fl6;
2885 struct in6_addr gateway;
2886};
2887
2888static struct rt6_info *__ip6_route_redirect(struct net *net,
2889 struct fib6_table *table,
2890 struct flowi6 *fl6,
David Ahernb75cc8f2018-03-02 08:32:17 -08002891 const struct sk_buff *skb,
Duan Jiongb55b76b2013-09-04 19:44:21 +08002892 int flags)
2893{
2894 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
David Ahern0b34eb02019-04-09 14:41:19 -07002895 struct rt6_info *ret = NULL;
David Ahern9b6b35a2019-04-16 14:36:02 -07002896 struct fib6_result res = {};
David Ahernc55c8982019-06-08 14:53:29 -07002897 struct fib6_nh_rd_arg arg = {
2898 .res = &res,
2899 .fl6 = fl6,
2900 .gw = &rdfl->gateway,
2901 .ret = &ret
2902 };
David Ahern8d1c8022018-04-17 17:33:26 -07002903 struct fib6_info *rt;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002904 struct fib6_node *fn;
2905
David Ahern31680ac2019-05-22 15:12:18 -07002906 /* l3mdev_update_flow overrides oif if the device is enslaved; in
2907 * this case we must match on the real ingress device, so reset it
2908 */
2909 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
2910 fl6->flowi6_oif = skb->dev->ifindex;
2911
Duan Jiongb55b76b2013-09-04 19:44:21 +08002912 /* Get the "current" route for this destination and
Alexander Alemayhu67c408c2017-01-07 23:53:00 +01002913 * check if the redirect has come from appropriate router.
Duan Jiongb55b76b2013-09-04 19:44:21 +08002914 *
2915 * RFC 4861 specifies that redirects should only be
2916 * accepted if they come from the nexthop to the target.
2917 * Due to the way the routes are chosen, this notion
2918 * is a bit fuzzy and one might need to check all possible
2919 * routes.
2920 */
2921
Wei Wang66f5d6c2017-10-06 12:06:10 -07002922 rcu_read_lock();
David Ahern64547432018-05-09 20:34:19 -07002923 fn = fib6_node_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
Duan Jiongb55b76b2013-09-04 19:44:21 +08002924restart:
Wei Wang66f5d6c2017-10-06 12:06:10 -07002925 for_each_fib6_node_rt_rcu(fn) {
David Ahern9b6b35a2019-04-16 14:36:02 -07002926 res.f6i = rt;
David Ahern14895682018-04-17 17:33:17 -07002927 if (fib6_check_expired(rt))
Duan Jiongb55b76b2013-09-04 19:44:21 +08002928 continue;
David Ahern93c2fb22018-04-18 15:38:59 -07002929 if (rt->fib6_flags & RTF_REJECT)
Duan Jiongb55b76b2013-09-04 19:44:21 +08002930 break;
David Ahernc55c8982019-06-08 14:53:29 -07002931 if (unlikely(rt->nh)) {
2932 if (nexthop_is_blackhole(rt->nh))
2933 continue;
2934 /* on match, res->nh is filled in and potentially ret */
2935 if (nexthop_for_each_fib6_nh(rt->nh,
2936 fib6_nh_redirect_match,
2937 &arg))
2938 goto out;
2939 } else {
2940 res.nh = rt->fib6_nh;
2941 if (ip6_redirect_nh_match(&res, fl6, &rdfl->gateway,
2942 &ret))
2943 goto out;
2944 }
Duan Jiongb55b76b2013-09-04 19:44:21 +08002945 }
2946
2947 if (!rt)
David Ahern421842e2018-04-17 17:33:18 -07002948 rt = net->ipv6.fib6_null_entry;
David Ahern93c2fb22018-04-18 15:38:59 -07002949 else if (rt->fib6_flags & RTF_REJECT) {
David Ahern23fb93a2018-04-17 17:33:23 -07002950 ret = net->ipv6.ip6_null_entry;
Martin KaFai Laub0a1ba52015-01-20 19:16:02 -08002951 goto out;
2952 }
2953
David Ahern421842e2018-04-17 17:33:18 -07002954 if (rt == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07002955 fn = fib6_backtrack(fn, &fl6->saddr);
2956 if (fn)
2957 goto restart;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002958 }
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07002959
David Ahern9b6b35a2019-04-16 14:36:02 -07002960 res.f6i = rt;
David Ahern1cf844c2019-05-22 20:27:59 -07002961 res.nh = rt->fib6_nh;
Martin KaFai Laub0a1ba52015-01-20 19:16:02 -08002962out:
David Ahern7d21fec2019-04-16 14:36:11 -07002963 if (ret) {
David Ahern10585b42019-03-20 09:24:50 -07002964 ip6_hold_safe(net, &ret);
David Ahern7d21fec2019-04-16 14:36:11 -07002965 } else {
2966 res.fib6_flags = res.f6i->fib6_flags;
2967 res.fib6_type = res.f6i->fib6_type;
David Ahern9b6b35a2019-04-16 14:36:02 -07002968 ret = ip6_create_rt_rcu(&res);
David Ahern7d21fec2019-04-16 14:36:11 -07002969 }
Duan Jiongb55b76b2013-09-04 19:44:21 +08002970
Wei Wang66f5d6c2017-10-06 12:06:10 -07002971 rcu_read_unlock();
Duan Jiongb55b76b2013-09-04 19:44:21 +08002972
David Ahern8ff2e5b2019-04-16 14:36:09 -07002973 trace_fib6_table_lookup(net, &res, table, fl6);
David Ahern23fb93a2018-04-17 17:33:23 -07002974 return ret;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002975};
2976
2977static struct dst_entry *ip6_route_redirect(struct net *net,
David Ahernb75cc8f2018-03-02 08:32:17 -08002978 const struct flowi6 *fl6,
2979 const struct sk_buff *skb,
2980 const struct in6_addr *gateway)
Duan Jiongb55b76b2013-09-04 19:44:21 +08002981{
2982 int flags = RT6_LOOKUP_F_HAS_SADDR;
2983 struct ip6rd_flowi rdfl;
2984
2985 rdfl.fl6 = *fl6;
2986 rdfl.gateway = *gateway;
2987
David Ahernb75cc8f2018-03-02 08:32:17 -08002988 return fib6_rule_lookup(net, &rdfl.fl6, skb,
Duan Jiongb55b76b2013-09-04 19:44:21 +08002989 flags, __ip6_route_redirect);
2990}
2991
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002992void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2993 kuid_t uid)
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002994{
2995 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2996 struct dst_entry *dst;
Maciej Żenczykowski1f7f10a2018-09-29 23:44:48 -07002997 struct flowi6 fl6 = {
2998 .flowi6_iif = LOOPBACK_IFINDEX,
2999 .flowi6_oif = oif,
3000 .flowi6_mark = mark,
3001 .daddr = iph->daddr,
3002 .saddr = iph->saddr,
3003 .flowlabel = ip6_flowinfo(iph),
3004 .flowi6_uid = uid,
3005 };
David S. Miller3a5ad2e2012-07-12 00:08:07 -07003006
David Ahernb75cc8f2018-03-02 08:32:17 -08003007 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
Duan Jiongb55b76b2013-09-04 19:44:21 +08003008 rt6_do_redirect(dst, NULL, skb);
David S. Miller3a5ad2e2012-07-12 00:08:07 -07003009 dst_release(dst);
3010}
3011EXPORT_SYMBOL_GPL(ip6_redirect);
3012
Maciej Żenczykowskid4563362018-09-29 23:44:50 -07003013void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif)
Duan Jiongc92a59e2013-08-22 12:07:35 +08003014{
3015 const struct ipv6hdr *iph = ipv6_hdr(skb);
3016 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
3017 struct dst_entry *dst;
Maciej Żenczykowski0b26fb12018-09-29 23:44:49 -07003018 struct flowi6 fl6 = {
3019 .flowi6_iif = LOOPBACK_IFINDEX,
3020 .flowi6_oif = oif,
Maciej Żenczykowski0b26fb12018-09-29 23:44:49 -07003021 .daddr = msg->dest,
3022 .saddr = iph->daddr,
3023 .flowi6_uid = sock_net_uid(net, NULL),
3024 };
Duan Jiongc92a59e2013-08-22 12:07:35 +08003025
David Ahernb75cc8f2018-03-02 08:32:17 -08003026 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
Duan Jiongb55b76b2013-09-04 19:44:21 +08003027 rt6_do_redirect(dst, NULL, skb);
Duan Jiongc92a59e2013-08-22 12:07:35 +08003028 dst_release(dst);
3029}
3030
David S. Miller3a5ad2e2012-07-12 00:08:07 -07003031void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
3032{
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09003033 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
3034 sk->sk_uid);
David S. Miller3a5ad2e2012-07-12 00:08:07 -07003035}
3036EXPORT_SYMBOL_GPL(ip6_sk_redirect);
3037
David S. Miller0dbaee32010-12-13 12:52:14 -08003038static unsigned int ip6_default_advmss(const struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003039{
David S. Miller0dbaee32010-12-13 12:52:14 -08003040 struct net_device *dev = dst->dev;
3041 unsigned int mtu = dst_mtu(dst);
3042 struct net *net = dev_net(dev);
3043
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
3045
Daniel Lezcano55786892008-03-04 13:47:47 -08003046 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
3047 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048
3049 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003050 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
3051 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
3052 * IPV6_MAXPLEN is also valid and means: "any MSS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053 * rely only on pmtu discovery"
3054 */
3055 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
3056 mtu = IPV6_MAXPLEN;
3057 return mtu;
3058}
3059
Steffen Klassertebb762f2011-11-23 02:12:51 +00003060static unsigned int ip6_mtu(const struct dst_entry *dst)
David S. Millerd33e4552010-12-14 13:01:14 -08003061{
David S. Millerd33e4552010-12-14 13:01:14 -08003062 struct inet6_dev *idev;
David Ahernd4ead6b2018-04-17 17:33:16 -07003063 unsigned int mtu;
Steffen Klassert618f9bc2011-11-23 02:13:31 +00003064
Martin KaFai Lau4b32b5a2015-04-28 13:03:06 -07003065 mtu = dst_metric_raw(dst, RTAX_MTU);
3066 if (mtu)
3067 goto out;
3068
Steffen Klassert618f9bc2011-11-23 02:13:31 +00003069 mtu = IPV6_MIN_MTU;
David S. Millerd33e4552010-12-14 13:01:14 -08003070
3071 rcu_read_lock();
3072 idev = __in6_dev_get(dst->dev);
3073 if (idev)
3074 mtu = idev->cnf.mtu6;
3075 rcu_read_unlock();
3076
Eric Dumazet30f78d82014-04-10 21:23:36 -07003077out:
Roopa Prabhu14972cb2016-08-24 20:10:43 -07003078 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3079
3080 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
David S. Millerd33e4552010-12-14 13:01:14 -08003081}
3082
David Ahern901731b2018-05-21 09:08:14 -07003083/* MTU selection:
3084 * 1. mtu on route is locked - use it
3085 * 2. mtu from nexthop exception
3086 * 3. mtu from egress device
3087 *
3088 * based on ip6_dst_mtu_forward and exception logic of
3089 * rt6_find_cached_rt; called with rcu_read_lock
3090 */
David Ahernb748f262019-04-16 14:36:06 -07003091u32 ip6_mtu_from_fib6(const struct fib6_result *res,
3092 const struct in6_addr *daddr,
3093 const struct in6_addr *saddr)
David Ahern901731b2018-05-21 09:08:14 -07003094{
David Ahernb748f262019-04-16 14:36:06 -07003095 const struct fib6_nh *nh = res->nh;
3096 struct fib6_info *f6i = res->f6i;
David Ahern901731b2018-05-21 09:08:14 -07003097 struct inet6_dev *idev;
Wei Wang510e2ce2019-05-16 13:30:54 -07003098 struct rt6_info *rt;
David Ahern901731b2018-05-21 09:08:14 -07003099 u32 mtu = 0;
3100
3101 if (unlikely(fib6_metric_locked(f6i, RTAX_MTU))) {
3102 mtu = f6i->fib6_pmtu;
3103 if (mtu)
3104 goto out;
3105 }
3106
Wei Wang510e2ce2019-05-16 13:30:54 -07003107 rt = rt6_find_cached_rt(res, daddr, saddr);
3108 if (unlikely(rt)) {
3109 mtu = dst_metric_raw(&rt->dst, RTAX_MTU);
3110 } else {
David Ahernb748f262019-04-16 14:36:06 -07003111 struct net_device *dev = nh->fib_nh_dev;
David Ahern901731b2018-05-21 09:08:14 -07003112
3113 mtu = IPV6_MIN_MTU;
3114 idev = __in6_dev_get(dev);
3115 if (idev && idev->cnf.mtu6 > mtu)
3116 mtu = idev->cnf.mtu6;
3117 }
3118
3119 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
3120out:
David Ahernb748f262019-04-16 14:36:06 -07003121 return mtu - lwtunnel_headroom(nh->fib_nh_lws, mtu);
David Ahern901731b2018-05-21 09:08:14 -07003122}
3123
YOSHIFUJI Hideaki3b009442007-12-06 16:11:48 -08003124struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
David S. Miller87a11572011-12-06 17:04:13 -05003125 struct flowi6 *fl6)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003126{
David S. Miller87a11572011-12-06 17:04:13 -05003127 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128 struct rt6_info *rt;
3129 struct inet6_dev *idev = in6_dev_get(dev);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003130 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003131
David S. Miller38308472011-12-03 18:02:47 -05003132 if (unlikely(!idev))
Eric Dumazet122bdf62012-03-14 21:13:11 +00003133 return ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134
Martin KaFai Lauad706862015-08-14 11:05:52 -07003135 rt = ip6_dst_alloc(net, dev, 0);
David S. Miller38308472011-12-03 18:02:47 -05003136 if (unlikely(!rt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137 in6_dev_put(idev);
David S. Miller87a11572011-12-06 17:04:13 -05003138 dst = ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003139 goto out;
3140 }
3141
Brendan McGrath588753f2017-12-13 22:14:57 +11003142 rt->dst.input = ip6_input;
Yan, Zheng8e2ec632011-09-05 21:34:30 +00003143 rt->dst.output = ip6_output;
Julian Anastasov550bab42013-10-20 15:43:04 +03003144 rt->rt6i_gateway = fl6->daddr;
David S. Miller87a11572011-12-06 17:04:13 -05003145 rt->rt6i_dst.addr = fl6->daddr;
Yan, Zheng8e2ec632011-09-05 21:34:30 +00003146 rt->rt6i_dst.plen = 128;
3147 rt->rt6i_idev = idev;
Li RongQing14edd872012-10-24 14:01:18 +08003148 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149
Ido Schimmel4c981e22018-01-07 12:45:04 +02003150 /* Add this dst into uncached_list so that rt6_disable_ip() can
Wei Wang587fea72017-06-17 10:42:36 -07003151 * do proper release of the net_device
3152 */
3153 rt6_uncached_list_add(rt);
Wei Wang81eb8442017-10-06 12:06:11 -07003154 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155
David S. Miller87a11572011-12-06 17:04:13 -05003156 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
3157
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158out:
David S. Miller87a11572011-12-06 17:04:13 -05003159 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160}
3161
Daniel Lezcano569d3642008-01-18 03:56:57 -08003162static int ip6_dst_gc(struct dst_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163{
Alexey Dobriyan86393e52009-08-29 01:34:49 +00003164 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
Daniel Lezcano7019b782008-03-04 13:50:14 -08003165 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
3166 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
3167 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
3168 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
3169 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
Eric Dumazetfc66f952010-10-08 06:37:34 +00003170 int entries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171
Eric Dumazetfc66f952010-10-08 06:37:34 +00003172 entries = dst_entries_get_fast(ops);
Michal Kubeček49a18d82013-08-01 10:04:24 +02003173 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
Eric Dumazetfc66f952010-10-08 06:37:34 +00003174 entries <= rt_max_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 goto out;
3176
Benjamin Thery6891a342008-03-04 13:49:47 -08003177 net->ipv6.ip6_rt_gc_expire++;
Li RongQing14956642014-05-19 17:30:28 +08003178 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
Eric Dumazetfc66f952010-10-08 06:37:34 +00003179 entries = dst_entries_get_slow(ops);
3180 if (entries < ops->gc_thresh)
Daniel Lezcano7019b782008-03-04 13:50:14 -08003181 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182out:
Daniel Lezcano7019b782008-03-04 13:50:14 -08003183 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
Eric Dumazetfc66f952010-10-08 06:37:34 +00003184 return entries > rt_max_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185}
3186
David Ahernb2c709c2019-06-24 13:44:51 -07003187static int ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg,
3188 const struct in6_addr *gw_addr, u32 tbid,
3189 int flags, struct fib6_result *res)
David Ahern8c145862016-04-24 21:26:04 -07003190{
3191 struct flowi6 fl6 = {
3192 .flowi6_oif = cfg->fc_ifindex,
3193 .daddr = *gw_addr,
3194 .saddr = cfg->fc_prefsrc,
3195 };
3196 struct fib6_table *table;
David Ahernb2c709c2019-06-24 13:44:51 -07003197 int err;
David Ahern8c145862016-04-24 21:26:04 -07003198
David Ahernf4797b32018-01-25 16:55:08 -08003199 table = fib6_get_table(net, tbid);
David Ahern8c145862016-04-24 21:26:04 -07003200 if (!table)
David Ahernb2c709c2019-06-24 13:44:51 -07003201 return -EINVAL;
David Ahern8c145862016-04-24 21:26:04 -07003202
3203 if (!ipv6_addr_any(&cfg->fc_prefsrc))
3204 flags |= RT6_LOOKUP_F_HAS_SADDR;
3205
David Ahernf4797b32018-01-25 16:55:08 -08003206 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
David Ahern8c145862016-04-24 21:26:04 -07003207
David Ahernb2c709c2019-06-24 13:44:51 -07003208 err = fib6_table_lookup(net, table, cfg->fc_ifindex, &fl6, res, flags);
3209 if (!err && res->f6i != net->ipv6.fib6_null_entry)
3210 fib6_select_path(net, res, &fl6, cfg->fc_ifindex,
3211 cfg->fc_ifindex != 0, NULL, flags);
David Ahern8c145862016-04-24 21:26:04 -07003212
David Ahernb2c709c2019-06-24 13:44:51 -07003213 return err;
David Ahern8c145862016-04-24 21:26:04 -07003214}
3215
David Ahernfc1e64e2018-01-25 16:55:09 -08003216static int ip6_route_check_nh_onlink(struct net *net,
3217 struct fib6_config *cfg,
David Ahern9fbb7042018-03-13 08:29:36 -07003218 const struct net_device *dev,
David Ahernfc1e64e2018-01-25 16:55:09 -08003219 struct netlink_ext_ack *extack)
3220{
David Ahernb2c709c2019-06-24 13:44:51 -07003221 u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN;
David Ahernfc1e64e2018-01-25 16:55:09 -08003222 const struct in6_addr *gw_addr = &cfg->fc_gateway;
David Ahernb2c709c2019-06-24 13:44:51 -07003223 struct fib6_result res = {};
David Ahernfc1e64e2018-01-25 16:55:09 -08003224 int err;
3225
David Ahernb2c709c2019-06-24 13:44:51 -07003226 err = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0, &res);
3227 if (!err && !(res.fib6_flags & RTF_REJECT) &&
3228 /* ignore match if it is the default route */
3229 !ipv6_addr_any(&res.f6i->fib6_dst.addr) &&
3230 (res.fib6_type != RTN_UNICAST || dev != res.nh->fib_nh_dev)) {
3231 NL_SET_ERR_MSG(extack,
3232 "Nexthop has invalid gateway or device mismatch");
3233 err = -EINVAL;
David Ahernfc1e64e2018-01-25 16:55:09 -08003234 }
3235
3236 return err;
3237}
3238
David Ahern1edce992018-01-25 16:55:07 -08003239static int ip6_route_check_nh(struct net *net,
3240 struct fib6_config *cfg,
3241 struct net_device **_dev,
3242 struct inet6_dev **idev)
3243{
3244 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3245 struct net_device *dev = _dev ? *_dev : NULL;
David Ahernb2c709c2019-06-24 13:44:51 -07003246 int flags = RT6_LOOKUP_F_IFACE;
3247 struct fib6_result res = {};
David Ahern1edce992018-01-25 16:55:07 -08003248 int err = -EHOSTUNREACH;
3249
3250 if (cfg->fc_table) {
David Ahernb2c709c2019-06-24 13:44:51 -07003251 err = ip6_nh_lookup_table(net, cfg, gw_addr,
3252 cfg->fc_table, flags, &res);
3253 /* gw_addr can not require a gateway or resolve to a reject
3254 * route. If a device is given, it must match the result.
3255 */
3256 if (err || res.fib6_flags & RTF_REJECT ||
3257 res.nh->fib_nh_gw_family ||
3258 (dev && dev != res.nh->fib_nh_dev))
3259 err = -EHOSTUNREACH;
David Ahern1edce992018-01-25 16:55:07 -08003260 }
3261
David Ahernb2c709c2019-06-24 13:44:51 -07003262 if (err < 0) {
3263 struct flowi6 fl6 = {
3264 .flowi6_oif = cfg->fc_ifindex,
3265 .daddr = *gw_addr,
3266 };
David Ahern1edce992018-01-25 16:55:07 -08003267
David Ahernb2c709c2019-06-24 13:44:51 -07003268 err = fib6_lookup(net, cfg->fc_ifindex, &fl6, &res, flags);
3269 if (err || res.fib6_flags & RTF_REJECT ||
3270 res.nh->fib_nh_gw_family)
3271 err = -EHOSTUNREACH;
David Ahern1edce992018-01-25 16:55:07 -08003272
David Ahernb2c709c2019-06-24 13:44:51 -07003273 if (err)
3274 return err;
3275
3276 fib6_select_path(net, &res, &fl6, cfg->fc_ifindex,
3277 cfg->fc_ifindex != 0, NULL, flags);
3278 }
3279
3280 err = 0;
David Ahern1edce992018-01-25 16:55:07 -08003281 if (dev) {
David Ahernb2c709c2019-06-24 13:44:51 -07003282 if (dev != res.nh->fib_nh_dev)
3283 err = -EHOSTUNREACH;
David Ahern1edce992018-01-25 16:55:07 -08003284 } else {
David Ahernb2c709c2019-06-24 13:44:51 -07003285 *_dev = dev = res.nh->fib_nh_dev;
David Ahern1edce992018-01-25 16:55:07 -08003286 dev_hold(dev);
David Ahernb2c709c2019-06-24 13:44:51 -07003287 *idev = in6_dev_get(dev);
David Ahern1edce992018-01-25 16:55:07 -08003288 }
3289
David Ahern1edce992018-01-25 16:55:07 -08003290 return err;
3291}
3292
David Ahern9fbb7042018-03-13 08:29:36 -07003293static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
3294 struct net_device **_dev, struct inet6_dev **idev,
3295 struct netlink_ext_ack *extack)
3296{
3297 const struct in6_addr *gw_addr = &cfg->fc_gateway;
3298 int gwa_type = ipv6_addr_type(gw_addr);
David Ahern232378e2018-03-13 08:29:37 -07003299 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
David Ahern9fbb7042018-03-13 08:29:36 -07003300 const struct net_device *dev = *_dev;
David Ahern232378e2018-03-13 08:29:37 -07003301 bool need_addr_check = !dev;
David Ahern9fbb7042018-03-13 08:29:36 -07003302 int err = -EINVAL;
3303
3304 /* if gw_addr is local we will fail to detect this in case
3305 * address is still TENTATIVE (DAD in progress). rt6_lookup()
3306 * will return already-added prefix route via interface that
3307 * prefix route was assigned to, which might be non-loopback.
3308 */
David Ahern232378e2018-03-13 08:29:37 -07003309 if (dev &&
3310 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3311 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
David Ahern9fbb7042018-03-13 08:29:36 -07003312 goto out;
3313 }
3314
3315 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
3316 /* IPv6 strictly inhibits using not link-local
3317 * addresses as nexthop address.
3318 * Otherwise, router will not able to send redirects.
3319 * It is very good, but in some (rare!) circumstances
3320 * (SIT, PtP, NBMA NOARP links) it is handy to allow
3321 * some exceptions. --ANK
3322 * We allow IPv4-mapped nexthops to support RFC4798-type
3323 * addressing
3324 */
3325 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
3326 NL_SET_ERR_MSG(extack, "Invalid gateway address");
3327 goto out;
3328 }
3329
David Ahernb2c709c2019-06-24 13:44:51 -07003330 rcu_read_lock();
3331
David Ahern9fbb7042018-03-13 08:29:36 -07003332 if (cfg->fc_flags & RTNH_F_ONLINK)
3333 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
3334 else
3335 err = ip6_route_check_nh(net, cfg, _dev, idev);
3336
David Ahernb2c709c2019-06-24 13:44:51 -07003337 rcu_read_unlock();
3338
David Ahern9fbb7042018-03-13 08:29:36 -07003339 if (err)
3340 goto out;
3341 }
3342
3343 /* reload in case device was changed */
3344 dev = *_dev;
3345
3346 err = -EINVAL;
3347 if (!dev) {
3348 NL_SET_ERR_MSG(extack, "Egress device not specified");
3349 goto out;
3350 } else if (dev->flags & IFF_LOOPBACK) {
3351 NL_SET_ERR_MSG(extack,
3352 "Egress device can not be loopback device for this route");
3353 goto out;
3354 }
David Ahern232378e2018-03-13 08:29:37 -07003355
3356 /* if we did not check gw_addr above, do so now that the
3357 * egress device has been resolved.
3358 */
3359 if (need_addr_check &&
3360 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
3361 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
3362 goto out;
3363 }
3364
David Ahern9fbb7042018-03-13 08:29:36 -07003365 err = 0;
3366out:
3367 return err;
3368}
3369
David Ahern83c442512019-03-27 20:53:50 -07003370static bool fib6_is_reject(u32 flags, struct net_device *dev, int addr_type)
3371{
3372 if ((flags & RTF_REJECT) ||
3373 (dev && (dev->flags & IFF_LOOPBACK) &&
3374 !(addr_type & IPV6_ADDR_LOOPBACK) &&
3375 !(flags & RTF_LOCAL)))
3376 return true;
3377
3378 return false;
3379}
3380
3381int fib6_nh_init(struct net *net, struct fib6_nh *fib6_nh,
3382 struct fib6_config *cfg, gfp_t gfp_flags,
3383 struct netlink_ext_ack *extack)
3384{
3385 struct net_device *dev = NULL;
3386 struct inet6_dev *idev = NULL;
3387 int addr_type;
3388 int err;
3389
David Ahernf1741732019-03-27 20:53:57 -07003390 fib6_nh->fib_nh_family = AF_INET6;
Eric Dumazet1bef4c22019-11-07 09:26:19 -08003391#ifdef CONFIG_IPV6_ROUTER_PREF
3392 fib6_nh->last_probe = jiffies;
3393#endif
David Ahernf1741732019-03-27 20:53:57 -07003394
David Ahern83c442512019-03-27 20:53:50 -07003395 err = -ENODEV;
3396 if (cfg->fc_ifindex) {
3397 dev = dev_get_by_index(net, cfg->fc_ifindex);
3398 if (!dev)
3399 goto out;
3400 idev = in6_dev_get(dev);
3401 if (!idev)
3402 goto out;
3403 }
3404
3405 if (cfg->fc_flags & RTNH_F_ONLINK) {
3406 if (!dev) {
3407 NL_SET_ERR_MSG(extack,
3408 "Nexthop device required for onlink");
3409 goto out;
3410 }
3411
3412 if (!(dev->flags & IFF_UP)) {
3413 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3414 err = -ENETDOWN;
3415 goto out;
3416 }
3417
David Ahernad1601a2019-03-27 20:53:56 -07003418 fib6_nh->fib_nh_flags |= RTNH_F_ONLINK;
David Ahern83c442512019-03-27 20:53:50 -07003419 }
3420
David Ahernad1601a2019-03-27 20:53:56 -07003421 fib6_nh->fib_nh_weight = 1;
David Ahern83c442512019-03-27 20:53:50 -07003422
3423 /* We cannot add true routes via loopback here,
3424 * they would result in kernel looping; promote them to reject routes
3425 */
3426 addr_type = ipv6_addr_type(&cfg->fc_dst);
3427 if (fib6_is_reject(cfg->fc_flags, dev, addr_type)) {
3428 /* hold loopback dev/idev if we haven't done so. */
3429 if (dev != net->loopback_dev) {
3430 if (dev) {
3431 dev_put(dev);
3432 in6_dev_put(idev);
3433 }
3434 dev = net->loopback_dev;
3435 dev_hold(dev);
3436 idev = in6_dev_get(dev);
3437 if (!idev) {
3438 err = -ENODEV;
3439 goto out;
3440 }
3441 }
David Ahern7dd73162019-06-03 18:37:03 -07003442 goto pcpu_alloc;
David Ahern83c442512019-03-27 20:53:50 -07003443 }
3444
3445 if (cfg->fc_flags & RTF_GATEWAY) {
3446 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
3447 if (err)
3448 goto out;
3449
David Ahernad1601a2019-03-27 20:53:56 -07003450 fib6_nh->fib_nh_gw6 = cfg->fc_gateway;
David Ahernbdf00462019-04-05 16:30:26 -07003451 fib6_nh->fib_nh_gw_family = AF_INET6;
David Ahern83c442512019-03-27 20:53:50 -07003452 }
3453
3454 err = -ENODEV;
3455 if (!dev)
3456 goto out;
3457
3458 if (idev->cnf.disable_ipv6) {
3459 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
3460 err = -EACCES;
3461 goto out;
3462 }
3463
3464 if (!(dev->flags & IFF_UP) && !cfg->fc_ignore_dev_down) {
3465 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3466 err = -ENETDOWN;
3467 goto out;
3468 }
3469
3470 if (!(cfg->fc_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3471 !netif_carrier_ok(dev))
David Ahernad1601a2019-03-27 20:53:56 -07003472 fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
David Ahern83c442512019-03-27 20:53:50 -07003473
Alexander Aringfaee6762020-03-27 18:00:21 -04003474 err = fib_nh_common_init(net, &fib6_nh->nh_common, cfg->fc_encap,
David Ahern7dd73162019-06-03 18:37:03 -07003475 cfg->fc_encap_type, cfg, gfp_flags, extack);
3476 if (err)
3477 goto out;
3478
3479pcpu_alloc:
David Ahernf40b6ae2019-05-22 20:27:55 -07003480 fib6_nh->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, gfp_flags);
3481 if (!fib6_nh->rt6i_pcpu) {
3482 err = -ENOMEM;
3483 goto out;
3484 }
3485
David Ahernad1601a2019-03-27 20:53:56 -07003486 fib6_nh->fib_nh_dev = dev;
David Ahernf1741732019-03-27 20:53:57 -07003487 fib6_nh->fib_nh_oif = dev->ifindex;
David Ahern83c442512019-03-27 20:53:50 -07003488 err = 0;
3489out:
3490 if (idev)
3491 in6_dev_put(idev);
3492
3493 if (err) {
David Ahernad1601a2019-03-27 20:53:56 -07003494 lwtstate_put(fib6_nh->fib_nh_lws);
3495 fib6_nh->fib_nh_lws = NULL;
David Ahern83c442512019-03-27 20:53:50 -07003496 if (dev)
3497 dev_put(dev);
3498 }
3499
3500 return err;
3501}
3502
David Aherndac7d0f2019-03-27 20:53:51 -07003503void fib6_nh_release(struct fib6_nh *fib6_nh)
3504{
David Aherncc5c0732019-05-22 20:27:58 -07003505 struct rt6_exception_bucket *bucket;
3506
3507 rcu_read_lock();
3508
3509 fib6_nh_flush_exceptions(fib6_nh, NULL);
3510 bucket = fib6_nh_get_excptn_bucket(fib6_nh, NULL);
3511 if (bucket) {
3512 rcu_assign_pointer(fib6_nh->rt6i_exception_bucket, NULL);
3513 kfree(bucket);
3514 }
3515
3516 rcu_read_unlock();
3517
David Ahernf40b6ae2019-05-22 20:27:55 -07003518 if (fib6_nh->rt6i_pcpu) {
3519 int cpu;
3520
3521 for_each_possible_cpu(cpu) {
3522 struct rt6_info **ppcpu_rt;
3523 struct rt6_info *pcpu_rt;
3524
3525 ppcpu_rt = per_cpu_ptr(fib6_nh->rt6i_pcpu, cpu);
3526 pcpu_rt = *ppcpu_rt;
3527 if (pcpu_rt) {
3528 dst_dev_put(&pcpu_rt->dst);
3529 dst_release(&pcpu_rt->dst);
3530 *ppcpu_rt = NULL;
3531 }
3532 }
3533
3534 free_percpu(fib6_nh->rt6i_pcpu);
3535 }
3536
David Ahern979e2762019-03-27 20:53:58 -07003537 fib_nh_common_release(&fib6_nh->nh_common);
David Aherndac7d0f2019-03-27 20:53:51 -07003538}
3539
David Ahern8d1c8022018-04-17 17:33:26 -07003540static struct fib6_info *ip6_route_info_create(struct fib6_config *cfg,
David Ahernacb54e32018-04-17 17:33:22 -07003541 gfp_t gfp_flags,
David Ahern333c4302017-05-21 10:12:04 -06003542 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003543{
Daniel Lezcano55786892008-03-04 13:47:47 -08003544 struct net *net = cfg->fc_nlinfo.nl_net;
David Ahern8d1c8022018-04-17 17:33:26 -07003545 struct fib6_info *rt = NULL;
David Ahernf88d8ea2019-06-03 20:19:52 -07003546 struct nexthop *nh = NULL;
Thomas Grafc71099a2006-08-04 23:20:06 -07003547 struct fib6_table *table;
David Ahernf88d8ea2019-06-03 20:19:52 -07003548 struct fib6_nh *fib6_nh;
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003549 int err = -EINVAL;
David Ahern83c442512019-03-27 20:53:50 -07003550 int addr_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003551
David Ahern557c44b2017-04-19 14:19:43 -07003552 /* RTF_PCPU is an internal flag; can not be set by userspace */
David Ahernd5d531c2017-05-21 10:12:05 -06003553 if (cfg->fc_flags & RTF_PCPU) {
3554 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
David Ahern557c44b2017-04-19 14:19:43 -07003555 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06003556 }
David Ahern557c44b2017-04-19 14:19:43 -07003557
Wei Wang2ea23522017-10-27 17:30:12 -07003558 /* RTF_CACHE is an internal flag; can not be set by userspace */
3559 if (cfg->fc_flags & RTF_CACHE) {
3560 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
3561 goto out;
3562 }
3563
David Aherne8478e82018-04-17 17:33:13 -07003564 if (cfg->fc_type > RTN_MAX) {
3565 NL_SET_ERR_MSG(extack, "Invalid route type");
3566 goto out;
3567 }
3568
David Ahernd5d531c2017-05-21 10:12:05 -06003569 if (cfg->fc_dst_len > 128) {
3570 NL_SET_ERR_MSG(extack, "Invalid prefix length");
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003571 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06003572 }
3573 if (cfg->fc_src_len > 128) {
3574 NL_SET_ERR_MSG(extack, "Invalid source address length");
3575 goto out;
3576 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577#ifndef CONFIG_IPV6_SUBTREES
David Ahernd5d531c2017-05-21 10:12:05 -06003578 if (cfg->fc_src_len) {
3579 NL_SET_ERR_MSG(extack,
3580 "Specifying source address requires IPV6_SUBTREES to be enabled");
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003581 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06003582 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583#endif
David Ahern5b983242019-06-08 14:53:34 -07003584 if (cfg->fc_nh_id) {
3585 nh = nexthop_find_by_id(net, cfg->fc_nh_id);
3586 if (!nh) {
3587 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
3588 goto out;
3589 }
3590 err = fib6_check_nexthop(nh, cfg, extack);
3591 if (err)
3592 goto out;
3593 }
David Ahernfc1e64e2018-01-25 16:55:09 -08003594
Matti Vaittinend71314b2011-11-14 00:14:49 +00003595 err = -ENOBUFS;
David S. Miller38308472011-12-03 18:02:47 -05003596 if (cfg->fc_nlinfo.nlh &&
3597 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
Matti Vaittinend71314b2011-11-14 00:14:49 +00003598 table = fib6_get_table(net, cfg->fc_table);
David S. Miller38308472011-12-03 18:02:47 -05003599 if (!table) {
Joe Perchesf3213832012-05-15 14:11:53 +00003600 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
Matti Vaittinend71314b2011-11-14 00:14:49 +00003601 table = fib6_new_table(net, cfg->fc_table);
3602 }
3603 } else {
3604 table = fib6_new_table(net, cfg->fc_table);
3605 }
David S. Miller38308472011-12-03 18:02:47 -05003606
3607 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07003608 goto out;
Thomas Grafc71099a2006-08-04 23:20:06 -07003609
David Ahern93531c62018-04-17 17:33:25 -07003610 err = -ENOMEM;
David Ahernf88d8ea2019-06-03 20:19:52 -07003611 rt = fib6_info_alloc(gfp_flags, !nh);
David Ahern93531c62018-04-17 17:33:25 -07003612 if (!rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 goto out;
David Ahern93531c62018-04-17 17:33:25 -07003614
David Ahernd7e774f2018-11-06 12:51:15 -08003615 rt->fib6_metrics = ip_fib_metrics_init(net, cfg->fc_mx, cfg->fc_mx_len,
3616 extack);
David Ahern767a2212018-10-04 20:07:51 -07003617 if (IS_ERR(rt->fib6_metrics)) {
3618 err = PTR_ERR(rt->fib6_metrics);
Eric Dumazetfda21d42018-10-05 09:17:50 -07003619 /* Do not leave garbage there. */
3620 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
David Ahern767a2212018-10-04 20:07:51 -07003621 goto out;
3622 }
3623
David Ahern93531c62018-04-17 17:33:25 -07003624 if (cfg->fc_flags & RTF_ADDRCONF)
3625 rt->dst_nocount = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626
Gao feng1716a962012-04-06 00:13:10 +00003627 if (cfg->fc_flags & RTF_EXPIRES)
David Ahern14895682018-04-17 17:33:17 -07003628 fib6_set_expires(rt, jiffies +
Gao feng1716a962012-04-06 00:13:10 +00003629 clock_t_to_jiffies(cfg->fc_expires));
3630 else
David Ahern14895682018-04-17 17:33:17 -07003631 fib6_clean_expires(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003632
Thomas Graf86872cb2006-08-22 00:01:08 -07003633 if (cfg->fc_protocol == RTPROT_UNSPEC)
3634 cfg->fc_protocol = RTPROT_BOOT;
David Ahern93c2fb22018-04-18 15:38:59 -07003635 rt->fib6_protocol = cfg->fc_protocol;
Thomas Graf86872cb2006-08-22 00:01:08 -07003636
David Ahern83c442512019-03-27 20:53:50 -07003637 rt->fib6_table = table;
3638 rt->fib6_metric = cfg->fc_metric;
David Ahernc7036d92019-06-19 10:50:24 -07003639 rt->fib6_type = cfg->fc_type ? : RTN_UNICAST;
David Ahern2b2450c2019-03-27 20:53:52 -07003640 rt->fib6_flags = cfg->fc_flags & ~RTF_GATEWAY;
Roopa Prabhu19e42e42015-07-21 10:43:48 +02003641
David Ahern93c2fb22018-04-18 15:38:59 -07003642 ipv6_addr_prefix(&rt->fib6_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
3643 rt->fib6_dst.plen = cfg->fc_dst_len;
Michal Kubečeke5fd3872014-03-27 13:04:08 +01003644
Linus Torvalds1da177e2005-04-16 15:20:36 -07003645#ifdef CONFIG_IPV6_SUBTREES
David Ahern93c2fb22018-04-18 15:38:59 -07003646 ipv6_addr_prefix(&rt->fib6_src.addr, &cfg->fc_src, cfg->fc_src_len);
3647 rt->fib6_src.plen = cfg->fc_src_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003648#endif
David Ahernf88d8ea2019-06-03 20:19:52 -07003649 if (nh) {
3650 if (!nexthop_get(nh)) {
3651 NL_SET_ERR_MSG(extack, "Nexthop has been deleted");
3652 goto out;
3653 }
3654 if (rt->fib6_src.plen) {
Colin Ian King4daa95a2019-06-06 09:40:39 +01003655 NL_SET_ERR_MSG(extack, "Nexthops can not be used with source routing");
David Ahernf88d8ea2019-06-03 20:19:52 -07003656 goto out;
3657 }
3658 rt->nh = nh;
3659 fib6_nh = nexthop_fib6_nh(rt->nh);
3660 } else {
3661 err = fib6_nh_init(net, rt->fib6_nh, cfg, gfp_flags, extack);
3662 if (err)
3663 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664
David Ahernf88d8ea2019-06-03 20:19:52 -07003665 fib6_nh = rt->fib6_nh;
3666
3667 /* We cannot add true routes via loopback here, they would
3668 * result in kernel looping; promote them to reject routes
3669 */
3670 addr_type = ipv6_addr_type(&cfg->fc_dst);
3671 if (fib6_is_reject(cfg->fc_flags, rt->fib6_nh->fib_nh_dev,
3672 addr_type))
3673 rt->fib6_flags = RTF_REJECT | RTF_NONEXTHOP;
3674 }
David Ahern955ec4c2018-01-24 19:45:29 -08003675
Daniel Walterc3968a82011-04-13 21:10:57 +00003676 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
David Ahernf88d8ea2019-06-03 20:19:52 -07003677 struct net_device *dev = fib6_nh->fib_nh_dev;
David Ahern83c442512019-03-27 20:53:50 -07003678
Daniel Walterc3968a82011-04-13 21:10:57 +00003679 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
David Ahernd5d531c2017-05-21 10:12:05 -06003680 NL_SET_ERR_MSG(extack, "Invalid source address");
Daniel Walterc3968a82011-04-13 21:10:57 +00003681 err = -EINVAL;
3682 goto out;
3683 }
David Ahern93c2fb22018-04-18 15:38:59 -07003684 rt->fib6_prefsrc.addr = cfg->fc_prefsrc;
3685 rt->fib6_prefsrc.plen = 128;
Daniel Walterc3968a82011-04-13 21:10:57 +00003686 } else
David Ahern93c2fb22018-04-18 15:38:59 -07003687 rt->fib6_prefsrc.plen = 0;
Daniel Walterc3968a82011-04-13 21:10:57 +00003688
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003689 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003690out:
David Ahern93531c62018-04-17 17:33:25 -07003691 fib6_info_release(rt);
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003692 return ERR_PTR(err);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003693}
3694
David Ahernacb54e32018-04-17 17:33:22 -07003695int ip6_route_add(struct fib6_config *cfg, gfp_t gfp_flags,
David Ahern333c4302017-05-21 10:12:04 -06003696 struct netlink_ext_ack *extack)
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003697{
David Ahern8d1c8022018-04-17 17:33:26 -07003698 struct fib6_info *rt;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003699 int err;
3700
David Ahernacb54e32018-04-17 17:33:22 -07003701 rt = ip6_route_info_create(cfg, gfp_flags, extack);
David Ahernd4ead6b2018-04-17 17:33:16 -07003702 if (IS_ERR(rt))
3703 return PTR_ERR(rt);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003704
David Ahernd4ead6b2018-04-17 17:33:16 -07003705 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
David Ahern93531c62018-04-17 17:33:25 -07003706 fib6_info_release(rt);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003707
Linus Torvalds1da177e2005-04-16 15:20:36 -07003708 return err;
3709}
3710
David Ahern8d1c8022018-04-17 17:33:26 -07003711static int __ip6_del_rt(struct fib6_info *rt, struct nl_info *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003712{
David Ahernafb1d4b52018-04-17 17:33:11 -07003713 struct net *net = info->nl_net;
Thomas Grafc71099a2006-08-04 23:20:06 -07003714 struct fib6_table *table;
David Ahernafb1d4b52018-04-17 17:33:11 -07003715 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003716
David Ahern421842e2018-04-17 17:33:18 -07003717 if (rt == net->ipv6.fib6_null_entry) {
Gao feng6825a262012-09-19 19:25:34 +00003718 err = -ENOENT;
3719 goto out;
3720 }
Patrick McHardy6c813a72006-08-06 22:22:47 -07003721
David Ahern93c2fb22018-04-18 15:38:59 -07003722 table = rt->fib6_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003723 spin_lock_bh(&table->tb6_lock);
Thomas Graf86872cb2006-08-22 00:01:08 -07003724 err = fib6_del(rt, info);
Wei Wang66f5d6c2017-10-06 12:06:10 -07003725 spin_unlock_bh(&table->tb6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003726
Gao feng6825a262012-09-19 19:25:34 +00003727out:
David Ahern93531c62018-04-17 17:33:25 -07003728 fib6_info_release(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729 return err;
3730}
3731
Roopa Prabhu11dd74b2020-04-27 13:56:45 -07003732int ip6_del_rt(struct net *net, struct fib6_info *rt, bool skip_notify)
Thomas Grafe0a1ad732006-08-22 00:00:21 -07003733{
Roopa Prabhu11dd74b2020-04-27 13:56:45 -07003734 struct nl_info info = {
3735 .nl_net = net,
3736 .skip_notify = skip_notify
3737 };
David Ahernafb1d4b52018-04-17 17:33:11 -07003738
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08003739 return __ip6_del_rt(rt, &info);
Thomas Grafe0a1ad732006-08-22 00:00:21 -07003740}
3741
David Ahern8d1c8022018-04-17 17:33:26 -07003742static int __ip6_del_rt_siblings(struct fib6_info *rt, struct fib6_config *cfg)
David Ahern0ae81332017-02-02 12:37:08 -08003743{
3744 struct nl_info *info = &cfg->fc_nlinfo;
WANG Conge3330032017-02-27 16:07:43 -08003745 struct net *net = info->nl_net;
David Ahern16a16cd2017-02-02 12:37:11 -08003746 struct sk_buff *skb = NULL;
David Ahern0ae81332017-02-02 12:37:08 -08003747 struct fib6_table *table;
WANG Conge3330032017-02-27 16:07:43 -08003748 int err = -ENOENT;
David Ahern0ae81332017-02-02 12:37:08 -08003749
David Ahern421842e2018-04-17 17:33:18 -07003750 if (rt == net->ipv6.fib6_null_entry)
WANG Conge3330032017-02-27 16:07:43 -08003751 goto out_put;
David Ahern93c2fb22018-04-18 15:38:59 -07003752 table = rt->fib6_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003753 spin_lock_bh(&table->tb6_lock);
David Ahern0ae81332017-02-02 12:37:08 -08003754
David Ahern93c2fb22018-04-18 15:38:59 -07003755 if (rt->fib6_nsiblings && cfg->fc_delete_all_nh) {
David Ahern8d1c8022018-04-17 17:33:26 -07003756 struct fib6_info *sibling, *next_sibling;
Ido Schimmel02846962019-12-23 15:28:18 +02003757 struct fib6_node *fn;
David Ahern0ae81332017-02-02 12:37:08 -08003758
David Ahern16a16cd2017-02-02 12:37:11 -08003759 /* prefer to send a single notification with all hops */
3760 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3761 if (skb) {
3762 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3763
David Ahernd4ead6b2018-04-17 17:33:16 -07003764 if (rt6_fill_node(net, skb, rt, NULL,
David Ahern16a16cd2017-02-02 12:37:11 -08003765 NULL, NULL, 0, RTM_DELROUTE,
3766 info->portid, seq, 0) < 0) {
3767 kfree_skb(skb);
3768 skb = NULL;
3769 } else
3770 info->skip_notify = 1;
3771 }
3772
Ido Schimmel02846962019-12-23 15:28:18 +02003773 /* 'rt' points to the first sibling route. If it is not the
3774 * leaf, then we do not need to send a notification. Otherwise,
3775 * we need to check if the last sibling has a next route or not
3776 * and emit a replace or delete notification, respectively.
3777 */
Ido Schimmel2881fd62019-06-18 18:12:49 +03003778 info->skip_notify_kernel = 1;
Ido Schimmel02846962019-12-23 15:28:18 +02003779 fn = rcu_dereference_protected(rt->fib6_node,
3780 lockdep_is_held(&table->tb6_lock));
3781 if (rcu_access_pointer(fn->leaf) == rt) {
3782 struct fib6_info *last_sibling, *replace_rt;
3783
3784 last_sibling = list_last_entry(&rt->fib6_siblings,
3785 struct fib6_info,
3786 fib6_siblings);
3787 replace_rt = rcu_dereference_protected(
3788 last_sibling->fib6_next,
3789 lockdep_is_held(&table->tb6_lock));
3790 if (replace_rt)
3791 call_fib6_entry_notifiers_replace(net,
3792 replace_rt);
3793 else
3794 call_fib6_multipath_entry_notifiers(net,
Ido Schimmelcaafb252019-12-23 15:28:20 +02003795 FIB_EVENT_ENTRY_DEL,
Ido Schimmel02846962019-12-23 15:28:18 +02003796 rt, rt->fib6_nsiblings,
3797 NULL);
3798 }
David Ahern0ae81332017-02-02 12:37:08 -08003799 list_for_each_entry_safe(sibling, next_sibling,
David Ahern93c2fb22018-04-18 15:38:59 -07003800 &rt->fib6_siblings,
3801 fib6_siblings) {
David Ahern0ae81332017-02-02 12:37:08 -08003802 err = fib6_del(sibling, info);
3803 if (err)
WANG Conge3330032017-02-27 16:07:43 -08003804 goto out_unlock;
David Ahern0ae81332017-02-02 12:37:08 -08003805 }
3806 }
3807
3808 err = fib6_del(rt, info);
WANG Conge3330032017-02-27 16:07:43 -08003809out_unlock:
Wei Wang66f5d6c2017-10-06 12:06:10 -07003810 spin_unlock_bh(&table->tb6_lock);
WANG Conge3330032017-02-27 16:07:43 -08003811out_put:
David Ahern93531c62018-04-17 17:33:25 -07003812 fib6_info_release(rt);
David Ahern16a16cd2017-02-02 12:37:11 -08003813
3814 if (skb) {
WANG Conge3330032017-02-27 16:07:43 -08003815 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
David Ahern16a16cd2017-02-02 12:37:11 -08003816 info->nlh, gfp_any());
3817 }
David Ahern0ae81332017-02-02 12:37:08 -08003818 return err;
3819}
3820
David Ahern0fa6efc2019-05-22 20:28:00 -07003821static int __ip6_del_cached_rt(struct rt6_info *rt, struct fib6_config *cfg)
David Ahern23fb93a2018-04-17 17:33:23 -07003822{
3823 int rc = -ESRCH;
3824
3825 if (cfg->fc_ifindex && rt->dst.dev->ifindex != cfg->fc_ifindex)
3826 goto out;
3827
3828 if (cfg->fc_flags & RTF_GATEWAY &&
3829 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3830 goto out;
Xin Long761f6022018-11-14 00:48:28 +08003831
3832 rc = rt6_remove_exception_rt(rt);
David Ahern23fb93a2018-04-17 17:33:23 -07003833out:
3834 return rc;
3835}
3836
David Ahern0fa6efc2019-05-22 20:28:00 -07003837static int ip6_del_cached_rt(struct fib6_config *cfg, struct fib6_info *rt,
3838 struct fib6_nh *nh)
3839{
3840 struct fib6_result res = {
3841 .f6i = rt,
3842 .nh = nh,
3843 };
3844 struct rt6_info *rt_cache;
3845
3846 rt_cache = rt6_find_cached_rt(&res, &cfg->fc_dst, &cfg->fc_src);
3847 if (rt_cache)
3848 return __ip6_del_cached_rt(rt_cache, cfg);
3849
3850 return 0;
3851}
3852
David Ahern5b983242019-06-08 14:53:34 -07003853struct fib6_nh_del_cached_rt_arg {
3854 struct fib6_config *cfg;
3855 struct fib6_info *f6i;
3856};
3857
3858static int fib6_nh_del_cached_rt(struct fib6_nh *nh, void *_arg)
3859{
3860 struct fib6_nh_del_cached_rt_arg *arg = _arg;
3861 int rc;
3862
3863 rc = ip6_del_cached_rt(arg->cfg, arg->f6i, nh);
3864 return rc != -ESRCH ? rc : 0;
3865}
3866
3867static int ip6_del_cached_rt_nh(struct fib6_config *cfg, struct fib6_info *f6i)
3868{
3869 struct fib6_nh_del_cached_rt_arg arg = {
3870 .cfg = cfg,
3871 .f6i = f6i
3872 };
3873
3874 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_del_cached_rt, &arg);
3875}
3876
David Ahern333c4302017-05-21 10:12:04 -06003877static int ip6_route_del(struct fib6_config *cfg,
3878 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879{
Thomas Grafc71099a2006-08-04 23:20:06 -07003880 struct fib6_table *table;
David Ahern8d1c8022018-04-17 17:33:26 -07003881 struct fib6_info *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 struct fib6_node *fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 int err = -ESRCH;
3884
Daniel Lezcano55786892008-03-04 13:47:47 -08003885 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
David Ahernd5d531c2017-05-21 10:12:05 -06003886 if (!table) {
3887 NL_SET_ERR_MSG(extack, "FIB table does not exist");
Thomas Grafc71099a2006-08-04 23:20:06 -07003888 return err;
David Ahernd5d531c2017-05-21 10:12:05 -06003889 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890
Wei Wang66f5d6c2017-10-06 12:06:10 -07003891 rcu_read_lock();
Thomas Grafc71099a2006-08-04 23:20:06 -07003892
3893 fn = fib6_locate(&table->tb6_root,
Thomas Graf86872cb2006-08-22 00:01:08 -07003894 &cfg->fc_dst, cfg->fc_dst_len,
Wei Wang38fbeee2017-10-06 12:06:02 -07003895 &cfg->fc_src, cfg->fc_src_len,
Wei Wang2b760fc2017-10-06 12:06:03 -07003896 !(cfg->fc_flags & RTF_CACHE));
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003897
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898 if (fn) {
Wei Wang66f5d6c2017-10-06 12:06:10 -07003899 for_each_fib6_node_rt_rcu(fn) {
David Ahernad1601a2019-03-27 20:53:56 -07003900 struct fib6_nh *nh;
3901
Stefano Brivio3401bfb2019-06-21 17:45:25 +02003902 if (rt->nh && cfg->fc_nh_id &&
3903 rt->nh->id != cfg->fc_nh_id)
David Ahern5b983242019-06-08 14:53:34 -07003904 continue;
David Ahern23fb93a2018-04-17 17:33:23 -07003905
David Ahern5b983242019-06-08 14:53:34 -07003906 if (cfg->fc_flags & RTF_CACHE) {
3907 int rc = 0;
3908
3909 if (rt->nh) {
3910 rc = ip6_del_cached_rt_nh(cfg, rt);
3911 } else if (cfg->fc_nh_id) {
3912 continue;
3913 } else {
3914 nh = rt->fib6_nh;
3915 rc = ip6_del_cached_rt(cfg, rt, nh);
3916 }
David Ahern0fa6efc2019-05-22 20:28:00 -07003917 if (rc != -ESRCH) {
3918 rcu_read_unlock();
3919 return rc;
David Ahern23fb93a2018-04-17 17:33:23 -07003920 }
3921 continue;
Wei Wang2b760fc2017-10-06 12:06:03 -07003922 }
David Ahernad1601a2019-03-27 20:53:56 -07003923
David Ahern5b983242019-06-08 14:53:34 -07003924 if (cfg->fc_metric && cfg->fc_metric != rt->fib6_metric)
3925 continue;
3926 if (cfg->fc_protocol &&
3927 cfg->fc_protocol != rt->fib6_protocol)
3928 continue;
3929
3930 if (rt->nh) {
3931 if (!fib6_info_hold_safe(rt))
3932 continue;
3933 rcu_read_unlock();
3934
3935 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3936 }
3937 if (cfg->fc_nh_id)
3938 continue;
3939
3940 nh = rt->fib6_nh;
Thomas Graf86872cb2006-08-22 00:01:08 -07003941 if (cfg->fc_ifindex &&
David Ahernad1601a2019-03-27 20:53:56 -07003942 (!nh->fib_nh_dev ||
3943 nh->fib_nh_dev->ifindex != cfg->fc_ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944 continue;
Thomas Graf86872cb2006-08-22 00:01:08 -07003945 if (cfg->fc_flags & RTF_GATEWAY &&
David Ahernad1601a2019-03-27 20:53:56 -07003946 !ipv6_addr_equal(&cfg->fc_gateway, &nh->fib_nh_gw6))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 continue;
Wei Wange873e4b2018-07-21 20:56:32 -07003948 if (!fib6_info_hold_safe(rt))
3949 continue;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003950 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951
David Ahern0ae81332017-02-02 12:37:08 -08003952 /* if gateway was specified only delete the one hop */
3953 if (cfg->fc_flags & RTF_GATEWAY)
3954 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3955
3956 return __ip6_del_rt_siblings(rt, cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957 }
3958 }
Wei Wang66f5d6c2017-10-06 12:06:10 -07003959 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960
3961 return err;
3962}
3963
David S. Miller6700c272012-07-17 03:29:28 -07003964static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
YOSHIFUJI Hideakia6279452006-08-23 17:18:26 -07003965{
YOSHIFUJI Hideakia6279452006-08-23 17:18:26 -07003966 struct netevent_redirect netevent;
David S. Millere8599ff2012-07-11 23:43:53 -07003967 struct rt6_info *rt, *nrt = NULL;
David Ahern85bd05d2019-04-16 14:36:01 -07003968 struct fib6_result res = {};
David S. Millere8599ff2012-07-11 23:43:53 -07003969 struct ndisc_options ndopts;
3970 struct inet6_dev *in6_dev;
3971 struct neighbour *neigh;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003972 struct rd_msg *msg;
David S. Miller6e157b62012-07-12 00:05:02 -07003973 int optlen, on_link;
3974 u8 *lladdr;
David S. Millere8599ff2012-07-11 23:43:53 -07003975
Simon Horman29a3cad2013-05-28 20:34:26 +00003976 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003977 optlen -= sizeof(*msg);
David S. Millere8599ff2012-07-11 23:43:53 -07003978
3979 if (optlen < 0) {
David S. Miller6e157b62012-07-12 00:05:02 -07003980 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003981 return;
3982 }
3983
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003984 msg = (struct rd_msg *)icmp6_hdr(skb);
David S. Millere8599ff2012-07-11 23:43:53 -07003985
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003986 if (ipv6_addr_is_multicast(&msg->dest)) {
David S. Miller6e157b62012-07-12 00:05:02 -07003987 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003988 return;
3989 }
3990
David S. Miller6e157b62012-07-12 00:05:02 -07003991 on_link = 0;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003992 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
David S. Millere8599ff2012-07-11 23:43:53 -07003993 on_link = 1;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003994 } else if (ipv6_addr_type(&msg->target) !=
David S. Millere8599ff2012-07-11 23:43:53 -07003995 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
David S. Miller6e157b62012-07-12 00:05:02 -07003996 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003997 return;
3998 }
3999
4000 in6_dev = __in6_dev_get(skb->dev);
4001 if (!in6_dev)
4002 return;
4003 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
4004 return;
4005
4006 /* RFC2461 8.1:
4007 * The IP source address of the Redirect MUST be the same as the current
4008 * first-hop router for the specified ICMP Destination Address.
4009 */
4010
Alexander Aringf997c552016-06-15 21:20:23 +02004011 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
David S. Millere8599ff2012-07-11 23:43:53 -07004012 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
4013 return;
4014 }
David S. Miller6e157b62012-07-12 00:05:02 -07004015
4016 lladdr = NULL;
David S. Millere8599ff2012-07-11 23:43:53 -07004017 if (ndopts.nd_opts_tgt_lladdr) {
4018 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
4019 skb->dev);
4020 if (!lladdr) {
4021 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
4022 return;
4023 }
4024 }
4025
David S. Miller6e157b62012-07-12 00:05:02 -07004026 rt = (struct rt6_info *) dst;
Matthias Schifferec13ad12015-11-02 01:24:38 +01004027 if (rt->rt6i_flags & RTF_REJECT) {
David S. Miller6e157b62012-07-12 00:05:02 -07004028 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
4029 return;
4030 }
4031
4032 /* Redirect received -> path was valid.
4033 * Look, redirects are sent only in response to data packets,
4034 * so that this nexthop apparently is reachable. --ANK
4035 */
Julian Anastasov0dec8792017-02-06 23:14:16 +02004036 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
David S. Miller6e157b62012-07-12 00:05:02 -07004037
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00004038 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
David S. Millere8599ff2012-07-11 23:43:53 -07004039 if (!neigh)
4040 return;
4041
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 /*
4043 * We have finally decided to accept it.
4044 */
4045
Alexander Aringf997c552016-06-15 21:20:23 +02004046 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047 NEIGH_UPDATE_F_WEAK_OVERRIDE|
4048 NEIGH_UPDATE_F_OVERRIDE|
4049 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
Alexander Aringf997c552016-06-15 21:20:23 +02004050 NEIGH_UPDATE_F_ISROUTER)),
4051 NDISC_REDIRECT, &ndopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052
David Ahern4d85cd02018-04-20 15:37:59 -07004053 rcu_read_lock();
David Ahern85bd05d2019-04-16 14:36:01 -07004054 res.f6i = rcu_dereference(rt->from);
David S. Millerff24e492019-05-02 22:14:21 -04004055 if (!res.f6i)
Martin KaFai Lau886b7a52019-04-30 10:45:12 -07004056 goto out;
David Ahern8a14e462018-04-23 11:32:07 -07004057
David Ahern49d5b8e2019-06-08 14:53:30 -07004058 if (res.f6i->nh) {
4059 struct fib6_nh_match_arg arg = {
4060 .dev = dst->dev,
4061 .gw = &rt->rt6i_gateway,
4062 };
4063
4064 nexthop_for_each_fib6_nh(res.f6i->nh,
4065 fib6_nh_find_match, &arg);
4066
4067 /* fib6_info uses a nexthop that does not have fib6_nh
4068 * using the dst->dev. Should be impossible
4069 */
4070 if (!arg.match)
4071 goto out;
4072 res.nh = arg.match;
4073 } else {
4074 res.nh = res.f6i->fib6_nh;
4075 }
4076
David Ahern7d21fec2019-04-16 14:36:11 -07004077 res.fib6_flags = res.f6i->fib6_flags;
4078 res.fib6_type = res.f6i->fib6_type;
David Ahern85bd05d2019-04-16 14:36:01 -07004079 nrt = ip6_rt_cache_alloc(&res, &msg->dest, NULL);
David S. Miller38308472011-12-03 18:02:47 -05004080 if (!nrt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004081 goto out;
4082
4083 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
4084 if (on_link)
4085 nrt->rt6i_flags &= ~RTF_GATEWAY;
4086
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00004087 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004088
Martin KaFai Lau886b7a52019-04-30 10:45:12 -07004089 /* rt6_insert_exception() will take care of duplicated exceptions */
David Ahern5012f0a2019-04-16 14:36:05 -07004090 if (rt6_insert_exception(nrt, &res)) {
Wei Wang2b760fc2017-10-06 12:06:03 -07004091 dst_release_immediate(&nrt->dst);
4092 goto out;
4093 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004094
Changli Gaod8d1f302010-06-10 23:31:35 -07004095 netevent.old = &rt->dst;
4096 netevent.new = &nrt->dst;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00004097 netevent.daddr = &msg->dest;
YOSHIFUJI Hideaki / 吉藤英明60592832013-01-14 09:28:27 +00004098 netevent.neigh = neigh;
Tom Tucker8d717402006-07-30 20:43:36 -07004099 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
4100
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101out:
Martin KaFai Lau886b7a52019-04-30 10:45:12 -07004102 rcu_read_unlock();
David S. Millere8599ff2012-07-11 23:43:53 -07004103 neigh_release(neigh);
David S. Miller6e157b62012-07-12 00:05:02 -07004104}
4105
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004106#ifdef CONFIG_IPV6_ROUTE_INFO
David Ahern8d1c8022018-04-17 17:33:26 -07004107static struct fib6_info *rt6_get_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +00004108 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -07004109 const struct in6_addr *gwaddr,
4110 struct net_device *dev)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004111{
David Ahern830218c2016-10-24 10:52:35 -07004112 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
4113 int ifindex = dev->ifindex;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004114 struct fib6_node *fn;
David Ahern8d1c8022018-04-17 17:33:26 -07004115 struct fib6_info *rt = NULL;
Thomas Grafc71099a2006-08-04 23:20:06 -07004116 struct fib6_table *table;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004117
David Ahern830218c2016-10-24 10:52:35 -07004118 table = fib6_get_table(net, tb_id);
David S. Miller38308472011-12-03 18:02:47 -05004119 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07004120 return NULL;
4121
Wei Wang66f5d6c2017-10-06 12:06:10 -07004122 rcu_read_lock();
Wei Wang38fbeee2017-10-06 12:06:02 -07004123 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004124 if (!fn)
4125 goto out;
4126
Wei Wang66f5d6c2017-10-06 12:06:10 -07004127 for_each_fib6_node_rt_rcu(fn) {
David Ahernf88d8ea2019-06-03 20:19:52 -07004128 /* these routes do not use nexthops */
4129 if (rt->nh)
4130 continue;
David Ahern1cf844c2019-05-22 20:27:59 -07004131 if (rt->fib6_nh->fib_nh_dev->ifindex != ifindex)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004132 continue;
David Ahern2b2450c2019-03-27 20:53:52 -07004133 if (!(rt->fib6_flags & RTF_ROUTEINFO) ||
David Ahern1cf844c2019-05-22 20:27:59 -07004134 !rt->fib6_nh->fib_nh_gw_family)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004135 continue;
David Ahern1cf844c2019-05-22 20:27:59 -07004136 if (!ipv6_addr_equal(&rt->fib6_nh->fib_nh_gw6, gwaddr))
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004137 continue;
Wei Wange873e4b2018-07-21 20:56:32 -07004138 if (!fib6_info_hold_safe(rt))
4139 continue;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004140 break;
4141 }
4142out:
Wei Wang66f5d6c2017-10-06 12:06:10 -07004143 rcu_read_unlock();
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004144 return rt;
4145}
4146
David Ahern8d1c8022018-04-17 17:33:26 -07004147static struct fib6_info *rt6_add_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +00004148 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -07004149 const struct in6_addr *gwaddr,
4150 struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +00004151 unsigned int pref)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004152{
Thomas Graf86872cb2006-08-22 00:01:08 -07004153 struct fib6_config cfg = {
Rami Rosen238fc7e2008-02-09 23:43:11 -08004154 .fc_metric = IP6_RT_PRIO_USER,
David Ahern830218c2016-10-24 10:52:35 -07004155 .fc_ifindex = dev->ifindex,
Thomas Graf86872cb2006-08-22 00:01:08 -07004156 .fc_dst_len = prefixlen,
4157 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
4158 RTF_UP | RTF_PREF(pref),
Xin Longb91d5322017-08-03 14:13:46 +08004159 .fc_protocol = RTPROT_RA,
David Aherne8478e82018-04-17 17:33:13 -07004160 .fc_type = RTN_UNICAST,
Eric W. Biederman15e47302012-09-07 20:12:54 +00004161 .fc_nlinfo.portid = 0,
Daniel Lezcanoefa2cea2008-03-04 13:46:48 -08004162 .fc_nlinfo.nlh = NULL,
4163 .fc_nlinfo.nl_net = net,
Thomas Graf86872cb2006-08-22 00:01:08 -07004164 };
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004165
David Ahern830218c2016-10-24 10:52:35 -07004166 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00004167 cfg.fc_dst = *prefix;
4168 cfg.fc_gateway = *gwaddr;
Thomas Graf86872cb2006-08-22 00:01:08 -07004169
YOSHIFUJI Hideakie317da92006-03-20 17:06:42 -08004170 /* We should treat it as a default route if prefix length is 0. */
4171 if (!prefixlen)
Thomas Graf86872cb2006-08-22 00:01:08 -07004172 cfg.fc_flags |= RTF_DEFAULT;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004173
David Ahernacb54e32018-04-17 17:33:22 -07004174 ip6_route_add(&cfg, GFP_ATOMIC, NULL);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004175
David Ahern830218c2016-10-24 10:52:35 -07004176 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08004177}
4178#endif
4179
David Ahern8d1c8022018-04-17 17:33:26 -07004180struct fib6_info *rt6_get_dflt_router(struct net *net,
David Ahernafb1d4b52018-04-17 17:33:11 -07004181 const struct in6_addr *addr,
4182 struct net_device *dev)
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004183{
David Ahern830218c2016-10-24 10:52:35 -07004184 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
David Ahern8d1c8022018-04-17 17:33:26 -07004185 struct fib6_info *rt;
Thomas Grafc71099a2006-08-04 23:20:06 -07004186 struct fib6_table *table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187
David Ahernafb1d4b52018-04-17 17:33:11 -07004188 table = fib6_get_table(net, tb_id);
David S. Miller38308472011-12-03 18:02:47 -05004189 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07004190 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191
Wei Wang66f5d6c2017-10-06 12:06:10 -07004192 rcu_read_lock();
4193 for_each_fib6_node_rt_rcu(&table->tb6_root) {
David Ahernf88d8ea2019-06-03 20:19:52 -07004194 struct fib6_nh *nh;
David Ahernad1601a2019-03-27 20:53:56 -07004195
David Ahernf88d8ea2019-06-03 20:19:52 -07004196 /* RA routes do not use nexthops */
4197 if (rt->nh)
4198 continue;
4199
4200 nh = rt->fib6_nh;
David Ahernad1601a2019-03-27 20:53:56 -07004201 if (dev == nh->fib_nh_dev &&
David Ahern93c2fb22018-04-18 15:38:59 -07004202 ((rt->fib6_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
David Ahernad1601a2019-03-27 20:53:56 -07004203 ipv6_addr_equal(&nh->fib_nh_gw6, addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004204 break;
4205 }
Wei Wange873e4b2018-07-21 20:56:32 -07004206 if (rt && !fib6_info_hold_safe(rt))
4207 rt = NULL;
Wei Wang66f5d6c2017-10-06 12:06:10 -07004208 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 return rt;
4210}
4211
David Ahern8d1c8022018-04-17 17:33:26 -07004212struct fib6_info *rt6_add_dflt_router(struct net *net,
David Ahernafb1d4b52018-04-17 17:33:11 -07004213 const struct in6_addr *gwaddr,
YOSHIFUJI Hideakiebacaaa2006-03-20 17:04:53 -08004214 struct net_device *dev,
4215 unsigned int pref)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216{
Thomas Graf86872cb2006-08-22 00:01:08 -07004217 struct fib6_config cfg = {
David Ahernca254492015-10-12 11:47:10 -07004218 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
Rami Rosen238fc7e2008-02-09 23:43:11 -08004219 .fc_metric = IP6_RT_PRIO_USER,
Thomas Graf86872cb2006-08-22 00:01:08 -07004220 .fc_ifindex = dev->ifindex,
4221 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
4222 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
Xin Longb91d5322017-08-03 14:13:46 +08004223 .fc_protocol = RTPROT_RA,
David Aherne8478e82018-04-17 17:33:13 -07004224 .fc_type = RTN_UNICAST,
Eric W. Biederman15e47302012-09-07 20:12:54 +00004225 .fc_nlinfo.portid = 0,
Daniel Lezcano55786892008-03-04 13:47:47 -08004226 .fc_nlinfo.nlh = NULL,
David Ahernafb1d4b52018-04-17 17:33:11 -07004227 .fc_nlinfo.nl_net = net,
Thomas Graf86872cb2006-08-22 00:01:08 -07004228 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07004229
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00004230 cfg.fc_gateway = *gwaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231
David Ahernacb54e32018-04-17 17:33:22 -07004232 if (!ip6_route_add(&cfg, GFP_ATOMIC, NULL)) {
David Ahern830218c2016-10-24 10:52:35 -07004233 struct fib6_table *table;
4234
4235 table = fib6_get_table(dev_net(dev), cfg.fc_table);
4236 if (table)
4237 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
4238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239
David Ahernafb1d4b52018-04-17 17:33:11 -07004240 return rt6_get_dflt_router(net, gwaddr, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241}
4242
David Ahernafb1d4b52018-04-17 17:33:11 -07004243static void __rt6_purge_dflt_routers(struct net *net,
4244 struct fib6_table *table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245{
David Ahern8d1c8022018-04-17 17:33:26 -07004246 struct fib6_info *rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247
4248restart:
Wei Wang66f5d6c2017-10-06 12:06:10 -07004249 rcu_read_lock();
4250 for_each_fib6_node_rt_rcu(&table->tb6_root) {
David Aherndcd1f572018-04-18 15:39:05 -07004251 struct net_device *dev = fib6_info_nh_dev(rt);
4252 struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
4253
David Ahern93c2fb22018-04-18 15:38:59 -07004254 if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
Wei Wange873e4b2018-07-21 20:56:32 -07004255 (!idev || idev->cnf.accept_ra != 2) &&
4256 fib6_info_hold_safe(rt)) {
David Ahern93531c62018-04-17 17:33:25 -07004257 rcu_read_unlock();
Roopa Prabhu11dd74b2020-04-27 13:56:45 -07004258 ip6_del_rt(net, rt, false);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259 goto restart;
4260 }
4261 }
Wei Wang66f5d6c2017-10-06 12:06:10 -07004262 rcu_read_unlock();
David Ahern830218c2016-10-24 10:52:35 -07004263
4264 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
4265}
4266
4267void rt6_purge_dflt_routers(struct net *net)
4268{
4269 struct fib6_table *table;
4270 struct hlist_head *head;
4271 unsigned int h;
4272
4273 rcu_read_lock();
4274
4275 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
4276 head = &net->ipv6.fib_table_hash[h];
4277 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
4278 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
David Ahernafb1d4b52018-04-17 17:33:11 -07004279 __rt6_purge_dflt_routers(net, table);
David Ahern830218c2016-10-24 10:52:35 -07004280 }
4281 }
4282
4283 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284}
4285
Daniel Lezcano55786892008-03-04 13:47:47 -08004286static void rtmsg_to_fib6_config(struct net *net,
4287 struct in6_rtmsg *rtmsg,
Thomas Graf86872cb2006-08-22 00:01:08 -07004288 struct fib6_config *cfg)
4289{
Maciej Żenczykowski8823a3a2018-09-29 23:44:52 -07004290 *cfg = (struct fib6_config){
4291 .fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
4292 : RT6_TABLE_MAIN,
4293 .fc_ifindex = rtmsg->rtmsg_ifindex,
David Ahern67f69512019-03-21 05:21:34 -07004294 .fc_metric = rtmsg->rtmsg_metric ? : IP6_RT_PRIO_USER,
Maciej Żenczykowski8823a3a2018-09-29 23:44:52 -07004295 .fc_expires = rtmsg->rtmsg_info,
4296 .fc_dst_len = rtmsg->rtmsg_dst_len,
4297 .fc_src_len = rtmsg->rtmsg_src_len,
4298 .fc_flags = rtmsg->rtmsg_flags,
4299 .fc_type = rtmsg->rtmsg_type,
Thomas Graf86872cb2006-08-22 00:01:08 -07004300
Maciej Żenczykowski8823a3a2018-09-29 23:44:52 -07004301 .fc_nlinfo.nl_net = net,
Thomas Graf86872cb2006-08-22 00:01:08 -07004302
Maciej Żenczykowski8823a3a2018-09-29 23:44:52 -07004303 .fc_dst = rtmsg->rtmsg_dst,
4304 .fc_src = rtmsg->rtmsg_src,
4305 .fc_gateway = rtmsg->rtmsg_gateway,
4306 };
Thomas Graf86872cb2006-08-22 00:01:08 -07004307}
4308
Daniel Lezcano55786892008-03-04 13:47:47 -08004309int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310{
Thomas Graf86872cb2006-08-22 00:01:08 -07004311 struct fib6_config cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312 struct in6_rtmsg rtmsg;
4313 int err;
4314
Ian Morris67ba4152014-08-24 21:53:10 +01004315 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316 case SIOCADDRT: /* Add a route */
4317 case SIOCDELRT: /* Delete a route */
Eric W. Biedermanaf31f412012-11-16 03:03:06 +00004318 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 return -EPERM;
4320 err = copy_from_user(&rtmsg, arg,
4321 sizeof(struct in6_rtmsg));
4322 if (err)
4323 return -EFAULT;
Thomas Graf86872cb2006-08-22 00:01:08 -07004324
Daniel Lezcano55786892008-03-04 13:47:47 -08004325 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
Thomas Graf86872cb2006-08-22 00:01:08 -07004326
Linus Torvalds1da177e2005-04-16 15:20:36 -07004327 rtnl_lock();
4328 switch (cmd) {
4329 case SIOCADDRT:
David Ahernacb54e32018-04-17 17:33:22 -07004330 err = ip6_route_add(&cfg, GFP_KERNEL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004331 break;
4332 case SIOCDELRT:
David Ahern333c4302017-05-21 10:12:04 -06004333 err = ip6_route_del(&cfg, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 break;
4335 default:
4336 err = -EINVAL;
4337 }
4338 rtnl_unlock();
4339
4340 return err;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07004341 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342
4343 return -EINVAL;
4344}
4345
4346/*
4347 * Drop the packet on the floor
4348 */
4349
Brian Haleyd5fdd6b2009-06-23 04:31:07 -07004350static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004351{
Eric Dumazetadf30902009-06-02 05:19:30 +00004352 struct dst_entry *dst = skb_dst(skb);
Stephen Suryaputra1d3fd8a2019-04-27 09:14:33 -04004353 struct net *net = dev_net(dst->dev);
4354 struct inet6_dev *idev;
4355 int type;
4356
4357 if (netif_is_l3_master(skb->dev) &&
4358 dst->dev == net->loopback_dev)
4359 idev = __in6_dev_get_safely(dev_get_by_index_rcu(net, IP6CB(skb)->iif));
4360 else
4361 idev = ip6_dst_idev(dst);
4362
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07004363 switch (ipstats_mib_noroutes) {
4364 case IPSTATS_MIB_INNOROUTES:
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07004365 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
Ulrich Weber45bb0062010-02-25 23:28:58 +00004366 if (type == IPV6_ADDR_ANY) {
Stephen Suryaputra1d3fd8a2019-04-27 09:14:33 -04004367 IP6_INC_STATS(net, idev, IPSTATS_MIB_INADDRERRORS);
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07004368 break;
4369 }
Joe Perchesa8eceea2020-03-12 15:50:22 -07004370 fallthrough;
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07004371 case IPSTATS_MIB_OUTNOROUTES:
Stephen Suryaputra1d3fd8a2019-04-27 09:14:33 -04004372 IP6_INC_STATS(net, idev, ipstats_mib_noroutes);
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07004373 break;
4374 }
Stephen Suryaputra1d3fd8a2019-04-27 09:14:33 -04004375
4376 /* Start over by dropping the dst for l3mdev case */
4377 if (netif_is_l3_master(skb->dev))
4378 skb_dst_drop(skb);
4379
Alexey Dobriyan3ffe5332010-02-18 08:25:24 +00004380 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 kfree_skb(skb);
4382 return 0;
4383}
4384
Thomas Graf9ce8ade2006-10-18 20:46:54 -07004385static int ip6_pkt_discard(struct sk_buff *skb)
4386{
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07004387 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07004388}
4389
Eric W. Biedermanede20592015-10-07 16:48:47 -05004390static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391{
Eric Dumazetadf30902009-06-02 05:19:30 +00004392 skb->dev = skb_dst(skb)->dev;
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07004393 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394}
4395
Thomas Graf9ce8ade2006-10-18 20:46:54 -07004396static int ip6_pkt_prohibit(struct sk_buff *skb)
4397{
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07004398 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07004399}
4400
Eric W. Biedermanede20592015-10-07 16:48:47 -05004401static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Thomas Graf9ce8ade2006-10-18 20:46:54 -07004402{
Eric Dumazetadf30902009-06-02 05:19:30 +00004403 skb->dev = skb_dst(skb)->dev;
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07004404 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07004405}
4406
Linus Torvalds1da177e2005-04-16 15:20:36 -07004407/*
4408 * Allocate a dst for local (unicast / anycast) address.
4409 */
4410
David Ahern360a9882018-04-18 15:39:00 -07004411struct fib6_info *addrconf_f6i_alloc(struct net *net,
4412 struct inet6_dev *idev,
4413 const struct in6_addr *addr,
4414 bool anycast, gfp_t gfp_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004415{
David Ahernc7a1ce32019-03-21 05:21:35 -07004416 struct fib6_config cfg = {
4417 .fc_table = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL,
4418 .fc_ifindex = idev->dev->ifindex,
Maciej Żenczykowskid55a2e32019-09-02 09:23:36 -07004419 .fc_flags = RTF_UP | RTF_NONEXTHOP,
David Ahernc7a1ce32019-03-21 05:21:35 -07004420 .fc_dst = *addr,
4421 .fc_dst_len = 128,
4422 .fc_protocol = RTPROT_KERNEL,
4423 .fc_nlinfo.nl_net = net,
4424 .fc_ignore_dev_down = true,
4425 };
Maciej Żenczykowskid55a2e32019-09-02 09:23:36 -07004426 struct fib6_info *f6i;
David Ahern5f02ce242016-09-10 12:09:54 -07004427
David Aherne8478e82018-04-17 17:33:13 -07004428 if (anycast) {
David Ahernc7a1ce32019-03-21 05:21:35 -07004429 cfg.fc_type = RTN_ANYCAST;
4430 cfg.fc_flags |= RTF_ANYCAST;
David Aherne8478e82018-04-17 17:33:13 -07004431 } else {
David Ahernc7a1ce32019-03-21 05:21:35 -07004432 cfg.fc_type = RTN_LOCAL;
4433 cfg.fc_flags |= RTF_LOCAL;
David Aherne8478e82018-04-17 17:33:13 -07004434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004435
Maciej Żenczykowskid55a2e32019-09-02 09:23:36 -07004436 f6i = ip6_route_info_create(&cfg, gfp_flags, NULL);
Maciej Żenczykowski8652f172019-09-05 20:56:37 -07004437 if (!IS_ERR(f6i))
Maciej Żenczykowskid55a2e32019-09-02 09:23:36 -07004438 f6i->dst_nocount = true;
4439 return f6i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004440}
4441
Daniel Walterc3968a82011-04-13 21:10:57 +00004442/* remove deleted ip from prefsrc entries */
4443struct arg_dev_net_ip {
4444 struct net_device *dev;
4445 struct net *net;
4446 struct in6_addr *addr;
4447};
4448
David Ahern8d1c8022018-04-17 17:33:26 -07004449static int fib6_remove_prefsrc(struct fib6_info *rt, void *arg)
Daniel Walterc3968a82011-04-13 21:10:57 +00004450{
4451 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
4452 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
4453 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
4454
David Ahernf88d8ea2019-06-03 20:19:52 -07004455 if (!rt->nh &&
4456 ((void *)rt->fib6_nh->fib_nh_dev == dev || !dev) &&
David Ahern421842e2018-04-17 17:33:18 -07004457 rt != net->ipv6.fib6_null_entry &&
David Ahern93c2fb22018-04-18 15:38:59 -07004458 ipv6_addr_equal(addr, &rt->fib6_prefsrc.addr)) {
Wei Wang60006a42017-10-06 12:05:58 -07004459 spin_lock_bh(&rt6_exception_lock);
Daniel Walterc3968a82011-04-13 21:10:57 +00004460 /* remove prefsrc entry */
David Ahern93c2fb22018-04-18 15:38:59 -07004461 rt->fib6_prefsrc.plen = 0;
Wei Wang60006a42017-10-06 12:05:58 -07004462 spin_unlock_bh(&rt6_exception_lock);
Daniel Walterc3968a82011-04-13 21:10:57 +00004463 }
4464 return 0;
4465}
4466
4467void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
4468{
4469 struct net *net = dev_net(ifp->idev->dev);
4470 struct arg_dev_net_ip adni = {
4471 .dev = ifp->idev->dev,
4472 .net = net,
4473 .addr = &ifp->addr,
4474 };
Li RongQing0c3584d2013-12-27 16:32:38 +08004475 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
Daniel Walterc3968a82011-04-13 21:10:57 +00004476}
4477
David Ahern2b2450c2019-03-27 20:53:52 -07004478#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT)
Duan Jiongbe7a0102014-05-15 15:56:14 +08004479
4480/* Remove routers and update dst entries when gateway turn into host. */
David Ahern8d1c8022018-04-17 17:33:26 -07004481static int fib6_clean_tohost(struct fib6_info *rt, void *arg)
Duan Jiongbe7a0102014-05-15 15:56:14 +08004482{
4483 struct in6_addr *gateway = (struct in6_addr *)arg;
David Ahernf88d8ea2019-06-03 20:19:52 -07004484 struct fib6_nh *nh;
Duan Jiongbe7a0102014-05-15 15:56:14 +08004485
David Ahernf88d8ea2019-06-03 20:19:52 -07004486 /* RA routes do not use nexthops */
4487 if (rt->nh)
4488 return 0;
4489
4490 nh = rt->fib6_nh;
David Ahern93c2fb22018-04-18 15:38:59 -07004491 if (((rt->fib6_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
David Aherncc5c0732019-05-22 20:27:58 -07004492 nh->fib_nh_gw_family && ipv6_addr_equal(gateway, &nh->fib_nh_gw6))
Duan Jiongbe7a0102014-05-15 15:56:14 +08004493 return -1;
Wei Wangb16cb452017-10-06 12:06:00 -07004494
4495 /* Further clean up cached routes in exception table.
4496 * This is needed because cached route may have a different
4497 * gateway than its 'parent' in the case of an ip redirect.
4498 */
David Aherncc5c0732019-05-22 20:27:58 -07004499 fib6_nh_exceptions_clean_tohost(nh, gateway);
Wei Wangb16cb452017-10-06 12:06:00 -07004500
Duan Jiongbe7a0102014-05-15 15:56:14 +08004501 return 0;
4502}
4503
4504void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
4505{
4506 fib6_clean_all(net, fib6_clean_tohost, gateway);
4507}
4508
Ido Schimmel2127d952018-01-07 12:45:03 +02004509struct arg_netdev_event {
4510 const struct net_device *dev;
Ido Schimmel4c981e22018-01-07 12:45:04 +02004511 union {
David Ahernecc56632019-04-23 08:48:09 -07004512 unsigned char nh_flags;
Ido Schimmel4c981e22018-01-07 12:45:04 +02004513 unsigned long event;
4514 };
Ido Schimmel2127d952018-01-07 12:45:03 +02004515};
4516
David Ahern8d1c8022018-04-17 17:33:26 -07004517static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004518{
David Ahern8d1c8022018-04-17 17:33:26 -07004519 struct fib6_info *iter;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004520 struct fib6_node *fn;
4521
David Ahern93c2fb22018-04-18 15:38:59 -07004522 fn = rcu_dereference_protected(rt->fib6_node,
4523 lockdep_is_held(&rt->fib6_table->tb6_lock));
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004524 iter = rcu_dereference_protected(fn->leaf,
David Ahern93c2fb22018-04-18 15:38:59 -07004525 lockdep_is_held(&rt->fib6_table->tb6_lock));
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004526 while (iter) {
David Ahern93c2fb22018-04-18 15:38:59 -07004527 if (iter->fib6_metric == rt->fib6_metric &&
David Ahern33bd5ac2018-07-03 14:36:21 -07004528 rt6_qualify_for_ecmp(iter))
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004529 return iter;
David Ahern8fb11a92018-05-04 13:54:24 -07004530 iter = rcu_dereference_protected(iter->fib6_next,
David Ahern93c2fb22018-04-18 15:38:59 -07004531 lockdep_is_held(&rt->fib6_table->tb6_lock));
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004532 }
4533
4534 return NULL;
4535}
4536
David Ahernf88d8ea2019-06-03 20:19:52 -07004537/* only called for fib entries with builtin fib6_nh */
David Ahern8d1c8022018-04-17 17:33:26 -07004538static bool rt6_is_dead(const struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004539{
David Ahern1cf844c2019-05-22 20:27:59 -07004540 if (rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD ||
4541 (rt->fib6_nh->fib_nh_flags & RTNH_F_LINKDOWN &&
4542 ip6_ignore_linkdown(rt->fib6_nh->fib_nh_dev)))
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004543 return true;
4544
4545 return false;
4546}
4547
David Ahern8d1c8022018-04-17 17:33:26 -07004548static int rt6_multipath_total_weight(const struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004549{
David Ahern8d1c8022018-04-17 17:33:26 -07004550 struct fib6_info *iter;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004551 int total = 0;
4552
4553 if (!rt6_is_dead(rt))
David Ahern1cf844c2019-05-22 20:27:59 -07004554 total += rt->fib6_nh->fib_nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004555
David Ahern93c2fb22018-04-18 15:38:59 -07004556 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings) {
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004557 if (!rt6_is_dead(iter))
David Ahern1cf844c2019-05-22 20:27:59 -07004558 total += iter->fib6_nh->fib_nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004559 }
4560
4561 return total;
4562}
4563
David Ahern8d1c8022018-04-17 17:33:26 -07004564static void rt6_upper_bound_set(struct fib6_info *rt, int *weight, int total)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004565{
4566 int upper_bound = -1;
4567
4568 if (!rt6_is_dead(rt)) {
David Ahern1cf844c2019-05-22 20:27:59 -07004569 *weight += rt->fib6_nh->fib_nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004570 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
4571 total) - 1;
4572 }
David Ahern1cf844c2019-05-22 20:27:59 -07004573 atomic_set(&rt->fib6_nh->fib_nh_upper_bound, upper_bound);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004574}
4575
David Ahern8d1c8022018-04-17 17:33:26 -07004576static void rt6_multipath_upper_bound_set(struct fib6_info *rt, int total)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004577{
David Ahern8d1c8022018-04-17 17:33:26 -07004578 struct fib6_info *iter;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004579 int weight = 0;
4580
4581 rt6_upper_bound_set(rt, &weight, total);
4582
David Ahern93c2fb22018-04-18 15:38:59 -07004583 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004584 rt6_upper_bound_set(iter, &weight, total);
4585}
4586
David Ahern8d1c8022018-04-17 17:33:26 -07004587void rt6_multipath_rebalance(struct fib6_info *rt)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004588{
David Ahern8d1c8022018-04-17 17:33:26 -07004589 struct fib6_info *first;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004590 int total;
4591
4592 /* In case the entire multipath route was marked for flushing,
4593 * then there is no need to rebalance upon the removal of every
4594 * sibling route.
4595 */
David Ahern93c2fb22018-04-18 15:38:59 -07004596 if (!rt->fib6_nsiblings || rt->should_flush)
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004597 return;
4598
4599 /* During lookup routes are evaluated in order, so we need to
4600 * make sure upper bounds are assigned from the first sibling
4601 * onwards.
4602 */
4603 first = rt6_multipath_first_sibling(rt);
4604 if (WARN_ON_ONCE(!first))
4605 return;
4606
4607 total = rt6_multipath_total_weight(first);
4608 rt6_multipath_upper_bound_set(first, total);
4609}
4610
David Ahern8d1c8022018-04-17 17:33:26 -07004611static int fib6_ifup(struct fib6_info *rt, void *p_arg)
Ido Schimmel2127d952018-01-07 12:45:03 +02004612{
4613 const struct arg_netdev_event *arg = p_arg;
David Ahern7aef6852018-04-17 17:33:10 -07004614 struct net *net = dev_net(arg->dev);
Ido Schimmel2127d952018-01-07 12:45:03 +02004615
David Ahernf88d8ea2019-06-03 20:19:52 -07004616 if (rt != net->ipv6.fib6_null_entry && !rt->nh &&
David Ahern1cf844c2019-05-22 20:27:59 -07004617 rt->fib6_nh->fib_nh_dev == arg->dev) {
4618 rt->fib6_nh->fib_nh_flags &= ~arg->nh_flags;
David Ahern7aef6852018-04-17 17:33:10 -07004619 fib6_update_sernum_upto_root(net, rt);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004620 rt6_multipath_rebalance(rt);
Ido Schimmel1de178e2018-01-07 12:45:15 +02004621 }
Ido Schimmel2127d952018-01-07 12:45:03 +02004622
4623 return 0;
4624}
4625
David Ahernecc56632019-04-23 08:48:09 -07004626void rt6_sync_up(struct net_device *dev, unsigned char nh_flags)
Ido Schimmel2127d952018-01-07 12:45:03 +02004627{
4628 struct arg_netdev_event arg = {
4629 .dev = dev,
Ido Schimmel6802f3a2018-01-12 22:07:36 +02004630 {
4631 .nh_flags = nh_flags,
4632 },
Ido Schimmel2127d952018-01-07 12:45:03 +02004633 };
4634
4635 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
4636 arg.nh_flags |= RTNH_F_LINKDOWN;
4637
4638 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
4639}
4640
David Ahernf88d8ea2019-06-03 20:19:52 -07004641/* only called for fib entries with inline fib6_nh */
David Ahern8d1c8022018-04-17 17:33:26 -07004642static bool rt6_multipath_uses_dev(const struct fib6_info *rt,
Ido Schimmel1de178e2018-01-07 12:45:15 +02004643 const struct net_device *dev)
4644{
David Ahern8d1c8022018-04-17 17:33:26 -07004645 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004646
David Ahern1cf844c2019-05-22 20:27:59 -07004647 if (rt->fib6_nh->fib_nh_dev == dev)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004648 return true;
David Ahern93c2fb22018-04-18 15:38:59 -07004649 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
David Ahern1cf844c2019-05-22 20:27:59 -07004650 if (iter->fib6_nh->fib_nh_dev == dev)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004651 return true;
4652
4653 return false;
4654}
4655
David Ahern8d1c8022018-04-17 17:33:26 -07004656static void rt6_multipath_flush(struct fib6_info *rt)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004657{
David Ahern8d1c8022018-04-17 17:33:26 -07004658 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004659
4660 rt->should_flush = 1;
David Ahern93c2fb22018-04-18 15:38:59 -07004661 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004662 iter->should_flush = 1;
4663}
4664
David Ahern8d1c8022018-04-17 17:33:26 -07004665static unsigned int rt6_multipath_dead_count(const struct fib6_info *rt,
Ido Schimmel1de178e2018-01-07 12:45:15 +02004666 const struct net_device *down_dev)
4667{
David Ahern8d1c8022018-04-17 17:33:26 -07004668 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004669 unsigned int dead = 0;
4670
David Ahern1cf844c2019-05-22 20:27:59 -07004671 if (rt->fib6_nh->fib_nh_dev == down_dev ||
4672 rt->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004673 dead++;
David Ahern93c2fb22018-04-18 15:38:59 -07004674 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
David Ahern1cf844c2019-05-22 20:27:59 -07004675 if (iter->fib6_nh->fib_nh_dev == down_dev ||
4676 iter->fib6_nh->fib_nh_flags & RTNH_F_DEAD)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004677 dead++;
4678
4679 return dead;
4680}
4681
David Ahern8d1c8022018-04-17 17:33:26 -07004682static void rt6_multipath_nh_flags_set(struct fib6_info *rt,
Ido Schimmel1de178e2018-01-07 12:45:15 +02004683 const struct net_device *dev,
David Ahernecc56632019-04-23 08:48:09 -07004684 unsigned char nh_flags)
Ido Schimmel1de178e2018-01-07 12:45:15 +02004685{
David Ahern8d1c8022018-04-17 17:33:26 -07004686 struct fib6_info *iter;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004687
David Ahern1cf844c2019-05-22 20:27:59 -07004688 if (rt->fib6_nh->fib_nh_dev == dev)
4689 rt->fib6_nh->fib_nh_flags |= nh_flags;
David Ahern93c2fb22018-04-18 15:38:59 -07004690 list_for_each_entry(iter, &rt->fib6_siblings, fib6_siblings)
David Ahern1cf844c2019-05-22 20:27:59 -07004691 if (iter->fib6_nh->fib_nh_dev == dev)
4692 iter->fib6_nh->fib_nh_flags |= nh_flags;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004693}
4694
David Aherna1a22c12017-01-18 07:40:36 -08004695/* called with write lock held for table with rt */
David Ahern8d1c8022018-04-17 17:33:26 -07004696static int fib6_ifdown(struct fib6_info *rt, void *p_arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004697{
Ido Schimmel4c981e22018-01-07 12:45:04 +02004698 const struct arg_netdev_event *arg = p_arg;
4699 const struct net_device *dev = arg->dev;
David Ahern7aef6852018-04-17 17:33:10 -07004700 struct net *net = dev_net(dev);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004701
David Ahernf88d8ea2019-06-03 20:19:52 -07004702 if (rt == net->ipv6.fib6_null_entry || rt->nh)
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004703 return 0;
4704
4705 switch (arg->event) {
4706 case NETDEV_UNREGISTER:
David Ahern1cf844c2019-05-22 20:27:59 -07004707 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004708 case NETDEV_DOWN:
Ido Schimmel1de178e2018-01-07 12:45:15 +02004709 if (rt->should_flush)
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004710 return -1;
David Ahern93c2fb22018-04-18 15:38:59 -07004711 if (!rt->fib6_nsiblings)
David Ahern1cf844c2019-05-22 20:27:59 -07004712 return rt->fib6_nh->fib_nh_dev == dev ? -1 : 0;
Ido Schimmel1de178e2018-01-07 12:45:15 +02004713 if (rt6_multipath_uses_dev(rt, dev)) {
4714 unsigned int count;
4715
4716 count = rt6_multipath_dead_count(rt, dev);
David Ahern93c2fb22018-04-18 15:38:59 -07004717 if (rt->fib6_nsiblings + 1 == count) {
Ido Schimmel1de178e2018-01-07 12:45:15 +02004718 rt6_multipath_flush(rt);
4719 return -1;
4720 }
4721 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
4722 RTNH_F_LINKDOWN);
David Ahern7aef6852018-04-17 17:33:10 -07004723 fib6_update_sernum(net, rt);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004724 rt6_multipath_rebalance(rt);
Ido Schimmel1de178e2018-01-07 12:45:15 +02004725 }
4726 return -2;
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004727 case NETDEV_CHANGE:
David Ahern1cf844c2019-05-22 20:27:59 -07004728 if (rt->fib6_nh->fib_nh_dev != dev ||
David Ahern93c2fb22018-04-18 15:38:59 -07004729 rt->fib6_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004730 break;
David Ahern1cf844c2019-05-22 20:27:59 -07004731 rt->fib6_nh->fib_nh_flags |= RTNH_F_LINKDOWN;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02004732 rt6_multipath_rebalance(rt);
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004733 break;
Ido Schimmel2b241362018-01-07 12:45:02 +02004734 }
David S. Millerc159d302011-12-26 15:24:36 -05004735
Linus Torvalds1da177e2005-04-16 15:20:36 -07004736 return 0;
4737}
4738
Ido Schimmel27c6fa72018-01-07 12:45:05 +02004739void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004740{
Ido Schimmel4c981e22018-01-07 12:45:04 +02004741 struct arg_netdev_event arg = {
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004742 .dev = dev,
Ido Schimmel6802f3a2018-01-12 22:07:36 +02004743 {
4744 .event = event,
4745 },
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004746 };
David Ahern7c6bb7d2018-10-11 20:17:21 -07004747 struct net *net = dev_net(dev);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004748
David Ahern7c6bb7d2018-10-11 20:17:21 -07004749 if (net->ipv6.sysctl.skip_notify_on_dev_down)
4750 fib6_clean_all_skip_notify(net, fib6_ifdown, &arg);
4751 else
4752 fib6_clean_all(net, fib6_ifdown, &arg);
Ido Schimmel4c981e22018-01-07 12:45:04 +02004753}
4754
4755void rt6_disable_ip(struct net_device *dev, unsigned long event)
4756{
4757 rt6_sync_down_dev(dev, event);
4758 rt6_uncached_list_flush_dev(dev_net(dev), dev);
4759 neigh_ifdown(&nd_tbl, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004760}
4761
Eric Dumazet95c96172012-04-15 05:58:06 +00004762struct rt6_mtu_change_arg {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004763 struct net_device *dev;
Eric Dumazet95c96172012-04-15 05:58:06 +00004764 unsigned int mtu;
David Ahernc0b220c2019-05-22 20:27:57 -07004765 struct fib6_info *f6i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004766};
4767
David Aherncc5c0732019-05-22 20:27:58 -07004768static int fib6_nh_mtu_change(struct fib6_nh *nh, void *_arg)
David Ahernc0b220c2019-05-22 20:27:57 -07004769{
4770 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *)_arg;
David Aherncc5c0732019-05-22 20:27:58 -07004771 struct fib6_info *f6i = arg->f6i;
David Ahernc0b220c2019-05-22 20:27:57 -07004772
4773 /* For administrative MTU increase, there is no way to discover
4774 * IPv6 PMTU increase, so PMTU increase should be updated here.
4775 * Since RFC 1981 doesn't include administrative MTU increase
4776 * update PMTU increase is a MUST. (i.e. jumbo frame)
4777 */
4778 if (nh->fib_nh_dev == arg->dev) {
4779 struct inet6_dev *idev = __in6_dev_get(arg->dev);
4780 u32 mtu = f6i->fib6_pmtu;
4781
4782 if (mtu >= arg->mtu ||
4783 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
4784 fib6_metric_set(f6i, RTAX_MTU, arg->mtu);
4785
4786 spin_lock_bh(&rt6_exception_lock);
David Aherncc5c0732019-05-22 20:27:58 -07004787 rt6_exceptions_update_pmtu(idev, nh, arg->mtu);
David Ahernc0b220c2019-05-22 20:27:57 -07004788 spin_unlock_bh(&rt6_exception_lock);
4789 }
4790
4791 return 0;
4792}
4793
4794static int rt6_mtu_change_route(struct fib6_info *f6i, void *p_arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004795{
4796 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
4797 struct inet6_dev *idev;
4798
4799 /* In IPv6 pmtu discovery is not optional,
4800 so that RTAX_MTU lock cannot disable it.
4801 We still use this lock to block changes
4802 caused by addrconf/ndisc.
4803 */
4804
4805 idev = __in6_dev_get(arg->dev);
David S. Miller38308472011-12-03 18:02:47 -05004806 if (!idev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004807 return 0;
4808
David Ahernc0b220c2019-05-22 20:27:57 -07004809 if (fib6_metric_locked(f6i, RTAX_MTU))
4810 return 0;
David Ahernd4ead6b2018-04-17 17:33:16 -07004811
David Ahernc0b220c2019-05-22 20:27:57 -07004812 arg->f6i = f6i;
David Ahern2d442342019-06-08 14:53:31 -07004813 if (f6i->nh) {
4814 /* fib6_nh_mtu_change only returns 0, so this is safe */
4815 return nexthop_for_each_fib6_nh(f6i->nh, fib6_nh_mtu_change,
4816 arg);
4817 }
4818
David Ahern1cf844c2019-05-22 20:27:59 -07004819 return fib6_nh_mtu_change(f6i->fib6_nh, arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004820}
4821
Eric Dumazet95c96172012-04-15 05:58:06 +00004822void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004823{
Thomas Grafc71099a2006-08-04 23:20:06 -07004824 struct rt6_mtu_change_arg arg = {
4825 .dev = dev,
4826 .mtu = mtu,
4827 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07004828
Li RongQing0c3584d2013-12-27 16:32:38 +08004829 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004830}
4831
Patrick McHardyef7c79e2007-06-05 12:38:30 -07004832static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
David Ahern75425652019-05-22 12:07:43 -07004833 [RTA_UNSPEC] = { .strict_start_type = RTA_DPORT + 1 },
Thomas Graf5176f912006-08-26 20:13:18 -07004834 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
Eric Dumazetaa8f8772018-04-22 18:29:23 -07004835 [RTA_PREFSRC] = { .len = sizeof(struct in6_addr) },
Thomas Graf86872cb2006-08-22 00:01:08 -07004836 [RTA_OIF] = { .type = NLA_U32 },
Thomas Grafab364a62006-08-22 00:01:47 -07004837 [RTA_IIF] = { .type = NLA_U32 },
Thomas Graf86872cb2006-08-22 00:01:08 -07004838 [RTA_PRIORITY] = { .type = NLA_U32 },
4839 [RTA_METRICS] = { .type = NLA_NESTED },
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004840 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004841 [RTA_PREF] = { .type = NLA_U8 },
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004842 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4843 [RTA_ENCAP] = { .type = NLA_NESTED },
Xin Long32bc2012015-12-16 17:50:11 +08004844 [RTA_EXPIRES] = { .type = NLA_U32 },
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +09004845 [RTA_UID] = { .type = NLA_U32 },
Liping Zhang3b45a412017-02-27 20:59:39 +08004846 [RTA_MARK] = { .type = NLA_U32 },
Eric Dumazetaa8f8772018-04-22 18:29:23 -07004847 [RTA_TABLE] = { .type = NLA_U32 },
Roopa Prabhueacb9382018-05-22 14:03:28 -07004848 [RTA_IP_PROTO] = { .type = NLA_U8 },
4849 [RTA_SPORT] = { .type = NLA_U16 },
4850 [RTA_DPORT] = { .type = NLA_U16 },
David Ahern5b983242019-06-08 14:53:34 -07004851 [RTA_NH_ID] = { .type = NLA_U32 },
Thomas Graf86872cb2006-08-22 00:01:08 -07004852};
4853
4854static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
David Ahern333c4302017-05-21 10:12:04 -06004855 struct fib6_config *cfg,
4856 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004857{
Thomas Graf86872cb2006-08-22 00:01:08 -07004858 struct rtmsg *rtm;
4859 struct nlattr *tb[RTA_MAX+1];
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004860 unsigned int pref;
Thomas Graf86872cb2006-08-22 00:01:08 -07004861 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004862
Johannes Berg8cb08172019-04-26 14:07:28 +02004863 err = nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
4864 rtm_ipv6_policy, extack);
Thomas Graf86872cb2006-08-22 00:01:08 -07004865 if (err < 0)
4866 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004867
Thomas Graf86872cb2006-08-22 00:01:08 -07004868 err = -EINVAL;
4869 rtm = nlmsg_data(nlh);
Thomas Graf86872cb2006-08-22 00:01:08 -07004870
Maciej Żenczykowski84db8402018-09-29 23:44:53 -07004871 *cfg = (struct fib6_config){
4872 .fc_table = rtm->rtm_table,
4873 .fc_dst_len = rtm->rtm_dst_len,
4874 .fc_src_len = rtm->rtm_src_len,
4875 .fc_flags = RTF_UP,
4876 .fc_protocol = rtm->rtm_protocol,
4877 .fc_type = rtm->rtm_type,
4878
4879 .fc_nlinfo.portid = NETLINK_CB(skb).portid,
4880 .fc_nlinfo.nlh = nlh,
4881 .fc_nlinfo.nl_net = sock_net(skb->sk),
4882 };
Thomas Graf86872cb2006-08-22 00:01:08 -07004883
Nicolas Dichtelef2c7d72012-09-05 02:12:42 +00004884 if (rtm->rtm_type == RTN_UNREACHABLE ||
4885 rtm->rtm_type == RTN_BLACKHOLE ||
Nicolas Dichtelb4949ab2012-09-06 05:53:35 +00004886 rtm->rtm_type == RTN_PROHIBIT ||
4887 rtm->rtm_type == RTN_THROW)
Thomas Graf86872cb2006-08-22 00:01:08 -07004888 cfg->fc_flags |= RTF_REJECT;
4889
Maciej Żenczykowskiab79ad12010-09-27 00:07:02 +00004890 if (rtm->rtm_type == RTN_LOCAL)
4891 cfg->fc_flags |= RTF_LOCAL;
4892
Martin KaFai Lau1f56a01f2015-04-28 13:03:03 -07004893 if (rtm->rtm_flags & RTM_F_CLONED)
4894 cfg->fc_flags |= RTF_CACHE;
4895
David Ahernfc1e64e2018-01-25 16:55:09 -08004896 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4897
David Ahern5b983242019-06-08 14:53:34 -07004898 if (tb[RTA_NH_ID]) {
4899 if (tb[RTA_GATEWAY] || tb[RTA_OIF] ||
4900 tb[RTA_MULTIPATH] || tb[RTA_ENCAP]) {
4901 NL_SET_ERR_MSG(extack,
4902 "Nexthop specification and nexthop id are mutually exclusive");
4903 goto errout;
4904 }
4905 cfg->fc_nh_id = nla_get_u32(tb[RTA_NH_ID]);
4906 }
4907
Thomas Graf86872cb2006-08-22 00:01:08 -07004908 if (tb[RTA_GATEWAY]) {
Jiri Benc67b61f62015-03-29 16:59:26 +02004909 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
Thomas Graf86872cb2006-08-22 00:01:08 -07004910 cfg->fc_flags |= RTF_GATEWAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004911 }
David Aherne3818542019-02-26 09:00:03 -08004912 if (tb[RTA_VIA]) {
4913 NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
4914 goto errout;
4915 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004916
4917 if (tb[RTA_DST]) {
4918 int plen = (rtm->rtm_dst_len + 7) >> 3;
4919
4920 if (nla_len(tb[RTA_DST]) < plen)
4921 goto errout;
4922
4923 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004924 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004925
4926 if (tb[RTA_SRC]) {
4927 int plen = (rtm->rtm_src_len + 7) >> 3;
4928
4929 if (nla_len(tb[RTA_SRC]) < plen)
4930 goto errout;
4931
4932 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004933 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004934
Daniel Walterc3968a82011-04-13 21:10:57 +00004935 if (tb[RTA_PREFSRC])
Jiri Benc67b61f62015-03-29 16:59:26 +02004936 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
Daniel Walterc3968a82011-04-13 21:10:57 +00004937
Thomas Graf86872cb2006-08-22 00:01:08 -07004938 if (tb[RTA_OIF])
4939 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4940
4941 if (tb[RTA_PRIORITY])
4942 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4943
4944 if (tb[RTA_METRICS]) {
4945 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4946 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004947 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004948
4949 if (tb[RTA_TABLE])
4950 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4951
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004952 if (tb[RTA_MULTIPATH]) {
4953 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4954 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
David Ahern9ed59592017-01-17 14:57:36 -08004955
4956 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
David Ahernc255bd62017-05-27 16:19:27 -06004957 cfg->fc_mp_len, extack);
David Ahern9ed59592017-01-17 14:57:36 -08004958 if (err < 0)
4959 goto errout;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004960 }
4961
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004962 if (tb[RTA_PREF]) {
4963 pref = nla_get_u8(tb[RTA_PREF]);
4964 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4965 pref != ICMPV6_ROUTER_PREF_HIGH)
4966 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4967 cfg->fc_flags |= RTF_PREF(pref);
4968 }
4969
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004970 if (tb[RTA_ENCAP])
4971 cfg->fc_encap = tb[RTA_ENCAP];
4972
David Ahern9ed59592017-01-17 14:57:36 -08004973 if (tb[RTA_ENCAP_TYPE]) {
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004974 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4975
David Ahernc255bd62017-05-27 16:19:27 -06004976 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
David Ahern9ed59592017-01-17 14:57:36 -08004977 if (err < 0)
4978 goto errout;
4979 }
4980
Xin Long32bc2012015-12-16 17:50:11 +08004981 if (tb[RTA_EXPIRES]) {
4982 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4983
4984 if (addrconf_finite_timeout(timeout)) {
4985 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4986 cfg->fc_flags |= RTF_EXPIRES;
4987 }
4988 }
4989
Thomas Graf86872cb2006-08-22 00:01:08 -07004990 err = 0;
4991errout:
4992 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004993}
4994
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004995struct rt6_nh {
David Ahern8d1c8022018-04-17 17:33:26 -07004996 struct fib6_info *fib6_info;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004997 struct fib6_config r_cfg;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004998 struct list_head next;
4999};
5000
David Ahernd4ead6b2018-04-17 17:33:16 -07005001static int ip6_route_info_append(struct net *net,
5002 struct list_head *rt6_nh_list,
David Ahern8d1c8022018-04-17 17:33:26 -07005003 struct fib6_info *rt,
5004 struct fib6_config *r_cfg)
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005005{
5006 struct rt6_nh *nh;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005007 int err = -EEXIST;
5008
5009 list_for_each_entry(nh, rt6_nh_list, next) {
David Ahern8d1c8022018-04-17 17:33:26 -07005010 /* check if fib6_info already exists */
5011 if (rt6_duplicate_nexthop(nh->fib6_info, rt))
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005012 return err;
5013 }
5014
5015 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
5016 if (!nh)
5017 return -ENOMEM;
David Ahern8d1c8022018-04-17 17:33:26 -07005018 nh->fib6_info = rt;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005019 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
5020 list_add_tail(&nh->next, rt6_nh_list);
5021
5022 return 0;
5023}
5024
David Ahern8d1c8022018-04-17 17:33:26 -07005025static void ip6_route_mpath_notify(struct fib6_info *rt,
5026 struct fib6_info *rt_last,
David Ahern3b1137f2017-02-02 12:37:10 -08005027 struct nl_info *info,
5028 __u16 nlflags)
5029{
5030 /* if this is an APPEND route, then rt points to the first route
5031 * inserted and rt_last points to last route inserted. Userspace
5032 * wants a consistent dump of the route which starts at the first
5033 * nexthop. Since sibling routes are always added at the end of
5034 * the list, find the first sibling of the last route appended
5035 */
David Ahern93c2fb22018-04-18 15:38:59 -07005036 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->fib6_nsiblings) {
5037 rt = list_first_entry(&rt_last->fib6_siblings,
David Ahern8d1c8022018-04-17 17:33:26 -07005038 struct fib6_info,
David Ahern93c2fb22018-04-18 15:38:59 -07005039 fib6_siblings);
David Ahern3b1137f2017-02-02 12:37:10 -08005040 }
5041
5042 if (rt)
5043 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
5044}
5045
Ido Schimmel0ee0f472019-12-23 15:28:15 +02005046static bool ip6_route_mpath_should_notify(const struct fib6_info *rt)
5047{
5048 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
5049 bool should_notify = false;
5050 struct fib6_info *leaf;
5051 struct fib6_node *fn;
5052
5053 rcu_read_lock();
5054 fn = rcu_dereference(rt->fib6_node);
5055 if (!fn)
5056 goto out;
5057
5058 leaf = rcu_dereference(fn->leaf);
5059 if (!leaf)
5060 goto out;
5061
5062 if (rt == leaf ||
5063 (rt_can_ecmp && rt->fib6_metric == leaf->fib6_metric &&
5064 rt6_qualify_for_ecmp(leaf)))
5065 should_notify = true;
5066out:
5067 rcu_read_unlock();
5068
5069 return should_notify;
5070}
5071
David Ahern333c4302017-05-21 10:12:04 -06005072static int ip6_route_multipath_add(struct fib6_config *cfg,
5073 struct netlink_ext_ack *extack)
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005074{
David Ahern8d1c8022018-04-17 17:33:26 -07005075 struct fib6_info *rt_notif = NULL, *rt_last = NULL;
David Ahern3b1137f2017-02-02 12:37:10 -08005076 struct nl_info *info = &cfg->fc_nlinfo;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005077 struct fib6_config r_cfg;
5078 struct rtnexthop *rtnh;
David Ahern8d1c8022018-04-17 17:33:26 -07005079 struct fib6_info *rt;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005080 struct rt6_nh *err_nh;
5081 struct rt6_nh *nh, *nh_safe;
David Ahern3b1137f2017-02-02 12:37:10 -08005082 __u16 nlflags;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005083 int remaining;
5084 int attrlen;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005085 int err = 1;
5086 int nhn = 0;
5087 int replace = (cfg->fc_nlinfo.nlh &&
5088 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
5089 LIST_HEAD(rt6_nh_list);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005090
David Ahern3b1137f2017-02-02 12:37:10 -08005091 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
5092 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
5093 nlflags |= NLM_F_APPEND;
5094
Michal Kubeček35f1b4e2015-05-18 20:53:55 +02005095 remaining = cfg->fc_mp_len;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005096 rtnh = (struct rtnexthop *)cfg->fc_mp;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005097
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005098 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
David Ahern8d1c8022018-04-17 17:33:26 -07005099 * fib6_info structs per nexthop
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005100 */
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005101 while (rtnh_ok(rtnh, remaining)) {
5102 memcpy(&r_cfg, cfg, sizeof(*cfg));
5103 if (rtnh->rtnh_ifindex)
5104 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5105
5106 attrlen = rtnh_attrlen(rtnh);
5107 if (attrlen > 0) {
5108 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5109
5110 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5111 if (nla) {
Jiri Benc67b61f62015-03-29 16:59:26 +02005112 r_cfg.fc_gateway = nla_get_in6_addr(nla);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005113 r_cfg.fc_flags |= RTF_GATEWAY;
5114 }
Roopa Prabhu19e42e42015-07-21 10:43:48 +02005115 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
5116 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
5117 if (nla)
5118 r_cfg.fc_encap_type = nla_get_u16(nla);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005119 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005120
David Ahern68e2ffd2018-03-20 10:06:59 -07005121 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
David Ahernacb54e32018-04-17 17:33:22 -07005122 rt = ip6_route_info_create(&r_cfg, GFP_KERNEL, extack);
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07005123 if (IS_ERR(rt)) {
5124 err = PTR_ERR(rt);
5125 rt = NULL;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005126 goto cleanup;
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07005127 }
David Ahernb5d2d752018-07-15 09:35:19 -07005128 if (!rt6_qualify_for_ecmp(rt)) {
5129 err = -EINVAL;
5130 NL_SET_ERR_MSG(extack,
5131 "Device only routes can not be added for IPv6 using the multipath API.");
5132 fib6_info_release(rt);
5133 goto cleanup;
5134 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005135
David Ahern1cf844c2019-05-22 20:27:59 -07005136 rt->fib6_nh->fib_nh_weight = rtnh->rtnh_hops + 1;
Ido Schimmel398958a2018-01-09 16:40:28 +02005137
David Ahernd4ead6b2018-04-17 17:33:16 -07005138 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
5139 rt, &r_cfg);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005140 if (err) {
David Ahern93531c62018-04-17 17:33:25 -07005141 fib6_info_release(rt);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005142 goto cleanup;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005143 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005144
5145 rtnh = rtnh_next(rtnh, &remaining);
5146 }
5147
Ido Schimmel9eee3b42019-06-20 12:10:21 +03005148 if (list_empty(&rt6_nh_list)) {
5149 NL_SET_ERR_MSG(extack,
5150 "Invalid nexthop configuration - no valid nexthops");
5151 return -EINVAL;
5152 }
5153
David Ahern3b1137f2017-02-02 12:37:10 -08005154 /* for add and replace send one notification with all nexthops.
5155 * Skip the notification in fib6_add_rt2node and send one with
5156 * the full route when done
5157 */
5158 info->skip_notify = 1;
5159
Ido Schimmelebee3ca2019-06-18 18:12:48 +03005160 /* For add and replace, send one notification with all nexthops. For
5161 * append, send one notification with all appended nexthops.
5162 */
5163 info->skip_notify_kernel = 1;
5164
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005165 err_nh = NULL;
5166 list_for_each_entry(nh, &rt6_nh_list, next) {
David Ahern8d1c8022018-04-17 17:33:26 -07005167 err = __ip6_ins_rt(nh->fib6_info, info, extack);
5168 fib6_info_release(nh->fib6_info);
David Ahern3b1137f2017-02-02 12:37:10 -08005169
David Ahernf7225172018-06-04 13:41:42 -07005170 if (!err) {
5171 /* save reference to last route successfully inserted */
5172 rt_last = nh->fib6_info;
5173
5174 /* save reference to first route for notification */
5175 if (!rt_notif)
5176 rt_notif = nh->fib6_info;
5177 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005178
David Ahern8d1c8022018-04-17 17:33:26 -07005179 /* nh->fib6_info is used or freed at this point, reset to NULL*/
5180 nh->fib6_info = NULL;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005181 if (err) {
5182 if (replace && nhn)
Jakub Kicinskia5a82d82019-01-14 10:52:45 -08005183 NL_SET_ERR_MSG_MOD(extack,
5184 "multipath route replace failed (check consistency of installed routes)");
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005185 err_nh = nh;
5186 goto add_errout;
5187 }
5188
Nicolas Dichtel1a724182012-11-01 22:58:22 +00005189 /* Because each route is added like a single route we remove
Michal Kubeček27596472015-05-18 20:54:00 +02005190 * these flags after the first nexthop: if there is a collision,
5191 * we have already failed to add the first nexthop:
5192 * fib6_add_rt2node() has rejected it; when replacing, old
5193 * nexthops have been replaced by first new, the rest should
5194 * be added to it.
Nicolas Dichtel1a724182012-11-01 22:58:22 +00005195 */
Michal Kubeček27596472015-05-18 20:54:00 +02005196 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
5197 NLM_F_REPLACE);
Benjamin Poirierafecdb32020-02-12 10:41:07 +09005198 cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005199 nhn++;
5200 }
5201
Ido Schimmel0ee0f472019-12-23 15:28:15 +02005202 /* An in-kernel notification should only be sent in case the new
5203 * multipath route is added as the first route in the node, or if
5204 * it was appended to it. We pass 'rt_notif' since it is the first
5205 * sibling and might allow us to skip some checks in the replace case.
5206 */
5207 if (ip6_route_mpath_should_notify(rt_notif)) {
5208 enum fib_event_type fib_event;
5209
5210 if (rt_notif->fib6_nsiblings != nhn - 1)
5211 fib_event = FIB_EVENT_ENTRY_APPEND;
5212 else
Ido Schimmelcaafb252019-12-23 15:28:20 +02005213 fib_event = FIB_EVENT_ENTRY_REPLACE;
Ido Schimmel0ee0f472019-12-23 15:28:15 +02005214
5215 err = call_fib6_multipath_entry_notifiers(info->nl_net,
5216 fib_event, rt_notif,
5217 nhn - 1, extack);
5218 if (err) {
5219 /* Delete all the siblings that were just added */
5220 err_nh = NULL;
5221 goto add_errout;
5222 }
5223 }
Ido Schimmelebee3ca2019-06-18 18:12:48 +03005224
David Ahern3b1137f2017-02-02 12:37:10 -08005225 /* success ... tell user about new route */
5226 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005227 goto cleanup;
5228
5229add_errout:
David Ahern3b1137f2017-02-02 12:37:10 -08005230 /* send notification for routes that were added so that
5231 * the delete notifications sent by ip6_route_del are
5232 * coherent
5233 */
5234 if (rt_notif)
5235 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
5236
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005237 /* Delete routes that were already added */
5238 list_for_each_entry(nh, &rt6_nh_list, next) {
5239 if (err_nh == nh)
5240 break;
David Ahern333c4302017-05-21 10:12:04 -06005241 ip6_route_del(&nh->r_cfg, extack);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005242 }
5243
5244cleanup:
5245 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
David Ahern8d1c8022018-04-17 17:33:26 -07005246 if (nh->fib6_info)
5247 fib6_info_release(nh->fib6_info);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005248 list_del(&nh->next);
5249 kfree(nh);
5250 }
5251
5252 return err;
5253}
5254
David Ahern333c4302017-05-21 10:12:04 -06005255static int ip6_route_multipath_del(struct fib6_config *cfg,
5256 struct netlink_ext_ack *extack)
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005257{
5258 struct fib6_config r_cfg;
5259 struct rtnexthop *rtnh;
5260 int remaining;
5261 int attrlen;
5262 int err = 1, last_err = 0;
5263
5264 remaining = cfg->fc_mp_len;
5265 rtnh = (struct rtnexthop *)cfg->fc_mp;
5266
5267 /* Parse a Multipath Entry */
5268 while (rtnh_ok(rtnh, remaining)) {
5269 memcpy(&r_cfg, cfg, sizeof(*cfg));
5270 if (rtnh->rtnh_ifindex)
5271 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
5272
5273 attrlen = rtnh_attrlen(rtnh);
5274 if (attrlen > 0) {
5275 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
5276
5277 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
5278 if (nla) {
5279 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
5280 r_cfg.fc_flags |= RTF_GATEWAY;
5281 }
5282 }
David Ahern333c4302017-05-21 10:12:04 -06005283 err = ip6_route_del(&r_cfg, extack);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07005284 if (err)
5285 last_err = err;
5286
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005287 rtnh = rtnh_next(rtnh, &remaining);
5288 }
5289
5290 return last_err;
5291}
5292
David Ahernc21ef3e2017-04-16 09:48:24 -07005293static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5294 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005295{
Thomas Graf86872cb2006-08-22 00:01:08 -07005296 struct fib6_config cfg;
5297 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005298
David Ahern333c4302017-05-21 10:12:04 -06005299 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
Thomas Graf86872cb2006-08-22 00:01:08 -07005300 if (err < 0)
5301 return err;
5302
David Ahern5b983242019-06-08 14:53:34 -07005303 if (cfg.fc_nh_id &&
5304 !nexthop_find_by_id(sock_net(skb->sk), cfg.fc_nh_id)) {
5305 NL_SET_ERR_MSG(extack, "Nexthop id does not exist");
5306 return -EINVAL;
5307 }
5308
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005309 if (cfg.fc_mp)
David Ahern333c4302017-05-21 10:12:04 -06005310 return ip6_route_multipath_del(&cfg, extack);
David Ahern0ae81332017-02-02 12:37:08 -08005311 else {
5312 cfg.fc_delete_all_nh = 1;
David Ahern333c4302017-05-21 10:12:04 -06005313 return ip6_route_del(&cfg, extack);
David Ahern0ae81332017-02-02 12:37:08 -08005314 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005315}
5316
David Ahernc21ef3e2017-04-16 09:48:24 -07005317static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
5318 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005319{
Thomas Graf86872cb2006-08-22 00:01:08 -07005320 struct fib6_config cfg;
5321 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005322
David Ahern333c4302017-05-21 10:12:04 -06005323 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
Thomas Graf86872cb2006-08-22 00:01:08 -07005324 if (err < 0)
5325 return err;
5326
David Ahern67f69512019-03-21 05:21:34 -07005327 if (cfg.fc_metric == 0)
5328 cfg.fc_metric = IP6_RT_PRIO_USER;
5329
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005330 if (cfg.fc_mp)
David Ahern333c4302017-05-21 10:12:04 -06005331 return ip6_route_multipath_add(&cfg, extack);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00005332 else
David Ahernacb54e32018-04-17 17:33:22 -07005333 return ip6_route_add(&cfg, GFP_KERNEL, extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005334}
5335
David Aherna1b7a1f2019-06-08 14:53:26 -07005336/* add the overhead of this fib6_nh to nexthop_len */
5337static int rt6_nh_nlmsg_size(struct fib6_nh *nh, void *arg)
Thomas Graf339bf982006-11-10 14:10:15 -08005338{
David Aherna1b7a1f2019-06-08 14:53:26 -07005339 int *nexthop_len = arg;
David Ahernbeb1afac52017-02-02 12:37:09 -08005340
David Aherna1b7a1f2019-06-08 14:53:26 -07005341 *nexthop_len += nla_total_size(0) /* RTA_MULTIPATH */
5342 + NLA_ALIGN(sizeof(struct rtnexthop))
5343 + nla_total_size(16); /* RTA_GATEWAY */
David Ahernf88d8ea2019-06-03 20:19:52 -07005344
David Aherna1b7a1f2019-06-08 14:53:26 -07005345 if (nh->fib_nh_lws) {
5346 /* RTA_ENCAP_TYPE */
5347 *nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
5348 /* RTA_ENCAP */
5349 *nexthop_len += nla_total_size(2);
5350 }
David Ahernbeb1afac52017-02-02 12:37:09 -08005351
David Aherna1b7a1f2019-06-08 14:53:26 -07005352 return 0;
5353}
5354
5355static size_t rt6_nlmsg_size(struct fib6_info *f6i)
5356{
5357 int nexthop_len;
5358
5359 if (f6i->nh) {
5360 nexthop_len = nla_total_size(4); /* RTA_NH_ID */
5361 nexthop_for_each_fib6_nh(f6i->nh, rt6_nh_nlmsg_size,
5362 &nexthop_len);
5363 } else {
5364 struct fib6_nh *nh = f6i->fib6_nh;
5365
5366 nexthop_len = 0;
5367 if (f6i->fib6_nsiblings) {
5368 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
5369 + NLA_ALIGN(sizeof(struct rtnexthop))
5370 + nla_total_size(16) /* RTA_GATEWAY */
5371 + lwtunnel_get_encap_size(nh->fib_nh_lws);
5372
5373 nexthop_len *= f6i->fib6_nsiblings;
5374 }
5375 nexthop_len += lwtunnel_get_encap_size(nh->fib_nh_lws);
David Ahernbeb1afac52017-02-02 12:37:09 -08005376 }
5377
Thomas Graf339bf982006-11-10 14:10:15 -08005378 return NLMSG_ALIGN(sizeof(struct rtmsg))
5379 + nla_total_size(16) /* RTA_SRC */
5380 + nla_total_size(16) /* RTA_DST */
5381 + nla_total_size(16) /* RTA_GATEWAY */
5382 + nla_total_size(16) /* RTA_PREFSRC */
5383 + nla_total_size(4) /* RTA_TABLE */
5384 + nla_total_size(4) /* RTA_IIF */
5385 + nla_total_size(4) /* RTA_OIF */
5386 + nla_total_size(4) /* RTA_PRIORITY */
Noriaki TAKAMIYA6a2b9ce2007-01-23 22:09:41 -08005387 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
Daniel Borkmannea697632015-01-05 23:57:47 +01005388 + nla_total_size(sizeof(struct rta_cacheinfo))
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01005389 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
Roopa Prabhu19e42e42015-07-21 10:43:48 +02005390 + nla_total_size(1) /* RTA_PREF */
David Ahernbeb1afac52017-02-02 12:37:09 -08005391 + nexthop_len;
5392}
5393
David Ahernf88d8ea2019-06-03 20:19:52 -07005394static int rt6_fill_node_nexthop(struct sk_buff *skb, struct nexthop *nh,
5395 unsigned char *flags)
5396{
5397 if (nexthop_is_multipath(nh)) {
5398 struct nlattr *mp;
5399
David Ahern4255ff02019-09-03 15:22:12 -07005400 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
David Ahernf88d8ea2019-06-03 20:19:52 -07005401 if (!mp)
5402 goto nla_put_failure;
5403
Donald Sharp7bdf4de2019-09-04 10:11:58 -04005404 if (nexthop_mpath_fill_node(skb, nh, AF_INET6))
David Ahernf88d8ea2019-06-03 20:19:52 -07005405 goto nla_put_failure;
5406
5407 nla_nest_end(skb, mp);
5408 } else {
5409 struct fib6_nh *fib6_nh;
5410
5411 fib6_nh = nexthop_fib6_nh(nh);
Donald Sharp7bdf4de2019-09-04 10:11:58 -04005412 if (fib_nexthop_info(skb, &fib6_nh->nh_common, AF_INET6,
David Ahernf88d8ea2019-06-03 20:19:52 -07005413 flags, false) < 0)
5414 goto nla_put_failure;
5415 }
5416
5417 return 0;
5418
5419nla_put_failure:
5420 return -EMSGSIZE;
5421}
5422
David Ahernd4ead6b2018-04-17 17:33:16 -07005423static int rt6_fill_node(struct net *net, struct sk_buff *skb,
David Ahern8d1c8022018-04-17 17:33:26 -07005424 struct fib6_info *rt, struct dst_entry *dst,
David Ahernd4ead6b2018-04-17 17:33:16 -07005425 struct in6_addr *dest, struct in6_addr *src,
Eric W. Biederman15e47302012-09-07 20:12:54 +00005426 int iif, int type, u32 portid, u32 seq,
David Ahernf8cfe2c2017-01-17 15:51:08 -08005427 unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005428{
Xin Long22d0bd82018-09-11 14:33:58 +08005429 struct rt6_info *rt6 = (struct rt6_info *)dst;
5430 struct rt6key *rt6_dst, *rt6_src;
5431 u32 *pmetrics, table, rt6_flags;
David Ahernf88d8ea2019-06-03 20:19:52 -07005432 unsigned char nh_flags = 0;
Thomas Graf2d7202b2006-08-22 00:01:27 -07005433 struct nlmsghdr *nlh;
Xin Long22d0bd82018-09-11 14:33:58 +08005434 struct rtmsg *rtm;
David Ahernd4ead6b2018-04-17 17:33:16 -07005435 long expires = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005436
Eric W. Biederman15e47302012-09-07 20:12:54 +00005437 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
David S. Miller38308472011-12-03 18:02:47 -05005438 if (!nlh)
Patrick McHardy26932562007-01-31 23:16:40 -08005439 return -EMSGSIZE;
Thomas Graf2d7202b2006-08-22 00:01:27 -07005440
Xin Long22d0bd82018-09-11 14:33:58 +08005441 if (rt6) {
5442 rt6_dst = &rt6->rt6i_dst;
5443 rt6_src = &rt6->rt6i_src;
5444 rt6_flags = rt6->rt6i_flags;
5445 } else {
5446 rt6_dst = &rt->fib6_dst;
5447 rt6_src = &rt->fib6_src;
5448 rt6_flags = rt->fib6_flags;
5449 }
5450
Thomas Graf2d7202b2006-08-22 00:01:27 -07005451 rtm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005452 rtm->rtm_family = AF_INET6;
Xin Long22d0bd82018-09-11 14:33:58 +08005453 rtm->rtm_dst_len = rt6_dst->plen;
5454 rtm->rtm_src_len = rt6_src->plen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005455 rtm->rtm_tos = 0;
David Ahern93c2fb22018-04-18 15:38:59 -07005456 if (rt->fib6_table)
5457 table = rt->fib6_table->tb6_id;
Thomas Grafc71099a2006-08-04 23:20:06 -07005458 else
Patrick McHardy9e762a42006-08-10 23:09:48 -07005459 table = RT6_TABLE_UNSPEC;
Kalash Nainwal97f00822019-02-20 16:23:04 -08005460 rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
David S. Millerc78679e2012-04-01 20:27:33 -04005461 if (nla_put_u32(skb, RTA_TABLE, table))
5462 goto nla_put_failure;
David Aherne8478e82018-04-17 17:33:13 -07005463
5464 rtm->rtm_type = rt->fib6_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005465 rtm->rtm_flags = 0;
5466 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
David Ahern93c2fb22018-04-18 15:38:59 -07005467 rtm->rtm_protocol = rt->fib6_protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005468
Xin Long22d0bd82018-09-11 14:33:58 +08005469 if (rt6_flags & RTF_CACHE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005470 rtm->rtm_flags |= RTM_F_CLONED;
5471
David Ahernd4ead6b2018-04-17 17:33:16 -07005472 if (dest) {
5473 if (nla_put_in6_addr(skb, RTA_DST, dest))
David S. Millerc78679e2012-04-01 20:27:33 -04005474 goto nla_put_failure;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09005475 rtm->rtm_dst_len = 128;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005476 } else if (rtm->rtm_dst_len)
Xin Long22d0bd82018-09-11 14:33:58 +08005477 if (nla_put_in6_addr(skb, RTA_DST, &rt6_dst->addr))
David S. Millerc78679e2012-04-01 20:27:33 -04005478 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005479#ifdef CONFIG_IPV6_SUBTREES
5480 if (src) {
Jiri Benc930345e2015-03-29 16:59:25 +02005481 if (nla_put_in6_addr(skb, RTA_SRC, src))
David S. Millerc78679e2012-04-01 20:27:33 -04005482 goto nla_put_failure;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09005483 rtm->rtm_src_len = 128;
David S. Millerc78679e2012-04-01 20:27:33 -04005484 } else if (rtm->rtm_src_len &&
Xin Long22d0bd82018-09-11 14:33:58 +08005485 nla_put_in6_addr(skb, RTA_SRC, &rt6_src->addr))
David S. Millerc78679e2012-04-01 20:27:33 -04005486 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005487#endif
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +09005488 if (iif) {
5489#ifdef CONFIG_IPV6_MROUTE
Xin Long22d0bd82018-09-11 14:33:58 +08005490 if (ipv6_addr_is_multicast(&rt6_dst->addr)) {
David Ahernfd61c6b2017-01-17 15:51:07 -08005491 int err = ip6mr_get_route(net, skb, rtm, portid);
Nikolay Aleksandrov2cf75072016-09-25 23:08:31 +02005492
David Ahernfd61c6b2017-01-17 15:51:07 -08005493 if (err == 0)
5494 return 0;
5495 if (err < 0)
5496 goto nla_put_failure;
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +09005497 } else
5498#endif
David S. Millerc78679e2012-04-01 20:27:33 -04005499 if (nla_put_u32(skb, RTA_IIF, iif))
5500 goto nla_put_failure;
David Ahernd4ead6b2018-04-17 17:33:16 -07005501 } else if (dest) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005502 struct in6_addr saddr_buf;
David Ahernd4ead6b2018-04-17 17:33:16 -07005503 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
Jiri Benc930345e2015-03-29 16:59:25 +02005504 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
David S. Millerc78679e2012-04-01 20:27:33 -04005505 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005506 }
Thomas Graf2d7202b2006-08-22 00:01:27 -07005507
David Ahern93c2fb22018-04-18 15:38:59 -07005508 if (rt->fib6_prefsrc.plen) {
Daniel Walterc3968a82011-04-13 21:10:57 +00005509 struct in6_addr saddr_buf;
David Ahern93c2fb22018-04-18 15:38:59 -07005510 saddr_buf = rt->fib6_prefsrc.addr;
Jiri Benc930345e2015-03-29 16:59:25 +02005511 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
David S. Millerc78679e2012-04-01 20:27:33 -04005512 goto nla_put_failure;
Daniel Walterc3968a82011-04-13 21:10:57 +00005513 }
5514
David Ahernd4ead6b2018-04-17 17:33:16 -07005515 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
5516 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
Thomas Graf2d7202b2006-08-22 00:01:27 -07005517 goto nla_put_failure;
5518
David Ahern93c2fb22018-04-18 15:38:59 -07005519 if (nla_put_u32(skb, RTA_PRIORITY, rt->fib6_metric))
David S. Millerc78679e2012-04-01 20:27:33 -04005520 goto nla_put_failure;
Li Wei82539472012-07-29 16:01:30 +00005521
David Ahernbeb1afac52017-02-02 12:37:09 -08005522 /* For multipath routes, walk the siblings list and add
5523 * each as a nexthop within RTA_MULTIPATH.
5524 */
Xin Long22d0bd82018-09-11 14:33:58 +08005525 if (rt6) {
5526 if (rt6_flags & RTF_GATEWAY &&
5527 nla_put_in6_addr(skb, RTA_GATEWAY, &rt6->rt6i_gateway))
5528 goto nla_put_failure;
5529
5530 if (dst->dev && nla_put_u32(skb, RTA_OIF, dst->dev->ifindex))
5531 goto nla_put_failure;
5532 } else if (rt->fib6_nsiblings) {
David Ahern8d1c8022018-04-17 17:33:26 -07005533 struct fib6_info *sibling, *next_sibling;
David Ahernbeb1afac52017-02-02 12:37:09 -08005534 struct nlattr *mp;
5535
Michal Kubecekae0be8d2019-04-26 11:13:06 +02005536 mp = nla_nest_start_noflag(skb, RTA_MULTIPATH);
David Ahernbeb1afac52017-02-02 12:37:09 -08005537 if (!mp)
5538 goto nla_put_failure;
5539
David Ahern1cf844c2019-05-22 20:27:59 -07005540 if (fib_add_nexthop(skb, &rt->fib6_nh->nh_common,
Donald Sharp7bdf4de2019-09-04 10:11:58 -04005541 rt->fib6_nh->fib_nh_weight, AF_INET6) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08005542 goto nla_put_failure;
5543
5544 list_for_each_entry_safe(sibling, next_sibling,
David Ahern93c2fb22018-04-18 15:38:59 -07005545 &rt->fib6_siblings, fib6_siblings) {
David Ahern1cf844c2019-05-22 20:27:59 -07005546 if (fib_add_nexthop(skb, &sibling->fib6_nh->nh_common,
Donald Sharp7bdf4de2019-09-04 10:11:58 -04005547 sibling->fib6_nh->fib_nh_weight,
5548 AF_INET6) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08005549 goto nla_put_failure;
5550 }
5551
5552 nla_nest_end(skb, mp);
David Ahernf88d8ea2019-06-03 20:19:52 -07005553 } else if (rt->nh) {
5554 if (nla_put_u32(skb, RTA_NH_ID, rt->nh->id))
5555 goto nla_put_failure;
David Ahernecc56632019-04-23 08:48:09 -07005556
David Ahernf88d8ea2019-06-03 20:19:52 -07005557 if (nexthop_is_blackhole(rt->nh))
5558 rtm->rtm_type = RTN_BLACKHOLE;
5559
Roopa Prabhu4f801162020-04-27 13:56:46 -07005560 if (net->ipv4.sysctl_nexthop_compat_mode &&
5561 rt6_fill_node_nexthop(skb, rt->nh, &nh_flags) < 0)
David Ahernf88d8ea2019-06-03 20:19:52 -07005562 goto nla_put_failure;
5563
5564 rtm->rtm_flags |= nh_flags;
5565 } else {
Donald Sharp7bdf4de2019-09-04 10:11:58 -04005566 if (fib_nexthop_info(skb, &rt->fib6_nh->nh_common, AF_INET6,
David Ahernecc56632019-04-23 08:48:09 -07005567 &nh_flags, false) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08005568 goto nla_put_failure;
David Ahernecc56632019-04-23 08:48:09 -07005569
5570 rtm->rtm_flags |= nh_flags;
David Ahernbeb1afac52017-02-02 12:37:09 -08005571 }
5572
Xin Long22d0bd82018-09-11 14:33:58 +08005573 if (rt6_flags & RTF_EXPIRES) {
David Ahern14895682018-04-17 17:33:17 -07005574 expires = dst ? dst->expires : rt->expires;
5575 expires -= jiffies;
5576 }
YOSHIFUJI Hideaki69cdf8f2008-05-19 16:55:13 -07005577
Ido Schimmelbb3c4ab2020-01-14 13:23:12 +02005578 if (!dst) {
5579 if (rt->offload)
5580 rtm->rtm_flags |= RTM_F_OFFLOAD;
5581 if (rt->trap)
5582 rtm->rtm_flags |= RTM_F_TRAP;
5583 }
5584
David Ahernd4ead6b2018-04-17 17:33:16 -07005585 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
Thomas Grafe3703b32006-11-27 09:27:07 -08005586 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005587
Xin Long22d0bd82018-09-11 14:33:58 +08005588 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt6_flags)))
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01005589 goto nla_put_failure;
5590
Roopa Prabhu19e42e42015-07-21 10:43:48 +02005591
Johannes Berg053c0952015-01-16 22:09:00 +01005592 nlmsg_end(skb, nlh);
5593 return 0;
Thomas Graf2d7202b2006-08-22 00:01:27 -07005594
5595nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08005596 nlmsg_cancel(skb, nlh);
5597 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005598}
5599
David Ahern2c170e02019-06-08 14:53:27 -07005600static int fib6_info_nh_uses_dev(struct fib6_nh *nh, void *arg)
5601{
5602 const struct net_device *dev = arg;
5603
5604 if (nh->fib_nh_dev == dev)
5605 return 1;
5606
5607 return 0;
5608}
5609
David Ahern13e38902018-10-15 18:56:44 -07005610static bool fib6_info_uses_dev(const struct fib6_info *f6i,
5611 const struct net_device *dev)
5612{
David Ahern2c170e02019-06-08 14:53:27 -07005613 if (f6i->nh) {
5614 struct net_device *_dev = (struct net_device *)dev;
5615
5616 return !!nexthop_for_each_fib6_nh(f6i->nh,
5617 fib6_info_nh_uses_dev,
5618 _dev);
5619 }
5620
David Ahern1cf844c2019-05-22 20:27:59 -07005621 if (f6i->fib6_nh->fib_nh_dev == dev)
David Ahern13e38902018-10-15 18:56:44 -07005622 return true;
5623
5624 if (f6i->fib6_nsiblings) {
5625 struct fib6_info *sibling, *next_sibling;
5626
5627 list_for_each_entry_safe(sibling, next_sibling,
5628 &f6i->fib6_siblings, fib6_siblings) {
David Ahern1cf844c2019-05-22 20:27:59 -07005629 if (sibling->fib6_nh->fib_nh_dev == dev)
David Ahern13e38902018-10-15 18:56:44 -07005630 return true;
5631 }
5632 }
5633
5634 return false;
5635}
5636
Stefano Brivio1e47b482019-06-21 17:45:27 +02005637struct fib6_nh_exception_dump_walker {
5638 struct rt6_rtnl_dump_arg *dump;
5639 struct fib6_info *rt;
5640 unsigned int flags;
5641 unsigned int skip;
5642 unsigned int count;
5643};
5644
5645static int rt6_nh_dump_exceptions(struct fib6_nh *nh, void *arg)
5646{
5647 struct fib6_nh_exception_dump_walker *w = arg;
5648 struct rt6_rtnl_dump_arg *dump = w->dump;
5649 struct rt6_exception_bucket *bucket;
5650 struct rt6_exception *rt6_ex;
5651 int i, err;
5652
5653 bucket = fib6_nh_get_excptn_bucket(nh, NULL);
5654 if (!bucket)
5655 return 0;
5656
5657 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
5658 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
5659 if (w->skip) {
5660 w->skip--;
5661 continue;
5662 }
5663
5664 /* Expiration of entries doesn't bump sernum, insertion
5665 * does. Removal is triggered by insertion, so we can
5666 * rely on the fact that if entries change between two
5667 * partial dumps, this node is scanned again completely,
5668 * see rt6_insert_exception() and fib6_dump_table().
5669 *
5670 * Count expired entries we go through as handled
5671 * entries that we'll skip next time, in case of partial
5672 * node dump. Otherwise, if entries expire meanwhile,
5673 * we'll skip the wrong amount.
5674 */
5675 if (rt6_check_expired(rt6_ex->rt6i)) {
5676 w->count++;
5677 continue;
5678 }
5679
5680 err = rt6_fill_node(dump->net, dump->skb, w->rt,
5681 &rt6_ex->rt6i->dst, NULL, NULL, 0,
5682 RTM_NEWROUTE,
5683 NETLINK_CB(dump->cb->skb).portid,
5684 dump->cb->nlh->nlmsg_seq, w->flags);
5685 if (err)
5686 return err;
5687
5688 w->count++;
5689 }
5690 bucket++;
5691 }
5692
5693 return 0;
5694}
5695
Stefano Briviobf9a8a0612019-06-21 17:45:26 +02005696/* Return -1 if done with node, number of handled routes on partial dump */
Stefano Brivio1e47b482019-06-21 17:45:27 +02005697int rt6_dump_route(struct fib6_info *rt, void *p_arg, unsigned int skip)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005698{
5699 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
David Ahern13e38902018-10-15 18:56:44 -07005700 struct fib_dump_filter *filter = &arg->filter;
5701 unsigned int flags = NLM_F_MULTI;
David Ahern1f17e2f2017-01-26 13:54:08 -08005702 struct net *net = arg->net;
Stefano Brivio1e47b482019-06-21 17:45:27 +02005703 int count = 0;
David Ahern1f17e2f2017-01-26 13:54:08 -08005704
David Ahern421842e2018-04-17 17:33:18 -07005705 if (rt == net->ipv6.fib6_null_entry)
Stefano Briviobf9a8a0612019-06-21 17:45:26 +02005706 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005707
David Ahern13e38902018-10-15 18:56:44 -07005708 if ((filter->flags & RTM_F_PREFIX) &&
5709 !(rt->fib6_flags & RTF_PREFIX_RT)) {
5710 /* success since this is not a prefix route */
Stefano Briviobf9a8a0612019-06-21 17:45:26 +02005711 return -1;
David Ahern13e38902018-10-15 18:56:44 -07005712 }
Stefano Brivio1e47b482019-06-21 17:45:27 +02005713 if (filter->filter_set &&
5714 ((filter->rt_type && rt->fib6_type != filter->rt_type) ||
5715 (filter->dev && !fib6_info_uses_dev(rt, filter->dev)) ||
5716 (filter->protocol && rt->fib6_protocol != filter->protocol))) {
5717 return -1;
5718 }
5719
5720 if (filter->filter_set ||
5721 !filter->dump_routes || !filter->dump_exceptions) {
David Ahern13e38902018-10-15 18:56:44 -07005722 flags |= NLM_F_DUMP_FILTERED;
David Ahernf8cfe2c2017-01-17 15:51:08 -08005723 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005724
Stefano Brivio1e47b482019-06-21 17:45:27 +02005725 if (filter->dump_routes) {
5726 if (skip) {
5727 skip--;
5728 } else {
5729 if (rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL,
5730 0, RTM_NEWROUTE,
5731 NETLINK_CB(arg->cb->skb).portid,
5732 arg->cb->nlh->nlmsg_seq, flags)) {
5733 return 0;
5734 }
5735 count++;
5736 }
5737 }
5738
5739 if (filter->dump_exceptions) {
5740 struct fib6_nh_exception_dump_walker w = { .dump = arg,
5741 .rt = rt,
5742 .flags = flags,
5743 .skip = skip,
5744 .count = 0 };
5745 int err;
5746
Eric Dumazet3b525692019-06-26 03:05:28 -07005747 rcu_read_lock();
Stefano Brivio1e47b482019-06-21 17:45:27 +02005748 if (rt->nh) {
5749 err = nexthop_for_each_fib6_nh(rt->nh,
5750 rt6_nh_dump_exceptions,
5751 &w);
5752 } else {
5753 err = rt6_nh_dump_exceptions(rt->fib6_nh, &w);
5754 }
Eric Dumazet3b525692019-06-26 03:05:28 -07005755 rcu_read_unlock();
Stefano Brivio1e47b482019-06-21 17:45:27 +02005756
5757 if (err)
5758 return count += w.count;
5759 }
Stefano Briviobf9a8a0612019-06-21 17:45:26 +02005760
5761 return -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005762}
5763
Jakub Kicinski0eff0a22019-01-18 10:46:24 -08005764static int inet6_rtm_valid_getroute_req(struct sk_buff *skb,
5765 const struct nlmsghdr *nlh,
5766 struct nlattr **tb,
5767 struct netlink_ext_ack *extack)
5768{
5769 struct rtmsg *rtm;
5770 int i, err;
5771
5772 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
5773 NL_SET_ERR_MSG_MOD(extack,
5774 "Invalid header for get route request");
5775 return -EINVAL;
5776 }
5777
5778 if (!netlink_strict_get_check(skb))
Johannes Berg8cb08172019-04-26 14:07:28 +02005779 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
5780 rtm_ipv6_policy, extack);
Jakub Kicinski0eff0a22019-01-18 10:46:24 -08005781
5782 rtm = nlmsg_data(nlh);
5783 if ((rtm->rtm_src_len && rtm->rtm_src_len != 128) ||
5784 (rtm->rtm_dst_len && rtm->rtm_dst_len != 128) ||
5785 rtm->rtm_table || rtm->rtm_protocol || rtm->rtm_scope ||
5786 rtm->rtm_type) {
5787 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get route request");
5788 return -EINVAL;
5789 }
5790 if (rtm->rtm_flags & ~RTM_F_FIB_MATCH) {
5791 NL_SET_ERR_MSG_MOD(extack,
5792 "Invalid flags for get route request");
5793 return -EINVAL;
5794 }
5795
Johannes Berg8cb08172019-04-26 14:07:28 +02005796 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
5797 rtm_ipv6_policy, extack);
Jakub Kicinski0eff0a22019-01-18 10:46:24 -08005798 if (err)
5799 return err;
5800
5801 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
5802 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
5803 NL_SET_ERR_MSG_MOD(extack, "rtm_src_len and rtm_dst_len must be 128 for IPv6");
5804 return -EINVAL;
5805 }
5806
5807 for (i = 0; i <= RTA_MAX; i++) {
5808 if (!tb[i])
5809 continue;
5810
5811 switch (i) {
5812 case RTA_SRC:
5813 case RTA_DST:
5814 case RTA_IIF:
5815 case RTA_OIF:
5816 case RTA_MARK:
5817 case RTA_UID:
5818 case RTA_SPORT:
5819 case RTA_DPORT:
5820 case RTA_IP_PROTO:
5821 break;
5822 default:
5823 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get route request");
5824 return -EINVAL;
5825 }
5826 }
5827
5828 return 0;
5829}
5830
David Ahernc21ef3e2017-04-16 09:48:24 -07005831static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5832 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005833{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09005834 struct net *net = sock_net(in_skb->sk);
Thomas Grafab364a62006-08-22 00:01:47 -07005835 struct nlattr *tb[RTA_MAX+1];
Roopa Prabhu18c3a612017-05-25 10:42:40 -07005836 int err, iif = 0, oif = 0;
David Aherna68886a2018-04-20 15:38:02 -07005837 struct fib6_info *from;
Roopa Prabhu18c3a612017-05-25 10:42:40 -07005838 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005839 struct rt6_info *rt;
Thomas Grafab364a62006-08-22 00:01:47 -07005840 struct sk_buff *skb;
5841 struct rtmsg *rtm;
Maciej Żenczykowski744486d2018-09-29 23:44:54 -07005842 struct flowi6 fl6 = {};
Roopa Prabhu18c3a612017-05-25 10:42:40 -07005843 bool fibmatch;
Thomas Grafab364a62006-08-22 00:01:47 -07005844
Jakub Kicinski0eff0a22019-01-18 10:46:24 -08005845 err = inet6_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
Thomas Grafab364a62006-08-22 00:01:47 -07005846 if (err < 0)
5847 goto errout;
5848
5849 err = -EINVAL;
Hannes Frederic Sowa38b70972016-06-11 20:08:19 +02005850 rtm = nlmsg_data(nlh);
5851 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
Roopa Prabhu18c3a612017-05-25 10:42:40 -07005852 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
Thomas Grafab364a62006-08-22 00:01:47 -07005853
5854 if (tb[RTA_SRC]) {
5855 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
5856 goto errout;
5857
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00005858 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
Thomas Grafab364a62006-08-22 00:01:47 -07005859 }
5860
5861 if (tb[RTA_DST]) {
5862 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
5863 goto errout;
5864
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00005865 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
Thomas Grafab364a62006-08-22 00:01:47 -07005866 }
5867
5868 if (tb[RTA_IIF])
5869 iif = nla_get_u32(tb[RTA_IIF]);
5870
5871 if (tb[RTA_OIF])
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00005872 oif = nla_get_u32(tb[RTA_OIF]);
Thomas Grafab364a62006-08-22 00:01:47 -07005873
Lorenzo Colitti2e47b292014-05-15 16:38:41 -07005874 if (tb[RTA_MARK])
5875 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
5876
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +09005877 if (tb[RTA_UID])
5878 fl6.flowi6_uid = make_kuid(current_user_ns(),
5879 nla_get_u32(tb[RTA_UID]));
5880 else
5881 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
5882
Roopa Prabhueacb9382018-05-22 14:03:28 -07005883 if (tb[RTA_SPORT])
5884 fl6.fl6_sport = nla_get_be16(tb[RTA_SPORT]);
5885
5886 if (tb[RTA_DPORT])
5887 fl6.fl6_dport = nla_get_be16(tb[RTA_DPORT]);
5888
5889 if (tb[RTA_IP_PROTO]) {
5890 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
Hangbin Liu5e1a99e2019-02-27 16:15:29 +08005891 &fl6.flowi6_proto, AF_INET6,
5892 extack);
Roopa Prabhueacb9382018-05-22 14:03:28 -07005893 if (err)
5894 goto errout;
5895 }
5896
Thomas Grafab364a62006-08-22 00:01:47 -07005897 if (iif) {
5898 struct net_device *dev;
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00005899 int flags = 0;
5900
Florian Westphal121622d2017-08-15 16:34:42 +02005901 rcu_read_lock();
5902
5903 dev = dev_get_by_index_rcu(net, iif);
Thomas Grafab364a62006-08-22 00:01:47 -07005904 if (!dev) {
Florian Westphal121622d2017-08-15 16:34:42 +02005905 rcu_read_unlock();
Thomas Grafab364a62006-08-22 00:01:47 -07005906 err = -ENODEV;
5907 goto errout;
5908 }
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00005909
5910 fl6.flowi6_iif = iif;
5911
5912 if (!ipv6_addr_any(&fl6.saddr))
5913 flags |= RT6_LOOKUP_F_HAS_SADDR;
5914
David Ahernb75cc8f2018-03-02 08:32:17 -08005915 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
Florian Westphal121622d2017-08-15 16:34:42 +02005916
5917 rcu_read_unlock();
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00005918 } else {
5919 fl6.flowi6_oif = oif;
5920
Ido Schimmel58acfd72017-12-20 12:28:25 +02005921 dst = ip6_route_output(net, NULL, &fl6);
Roopa Prabhu18c3a612017-05-25 10:42:40 -07005922 }
5923
Roopa Prabhu18c3a612017-05-25 10:42:40 -07005924
5925 rt = container_of(dst, struct rt6_info, dst);
5926 if (rt->dst.error) {
5927 err = rt->dst.error;
5928 ip6_rt_put(rt);
5929 goto errout;
Thomas Grafab364a62006-08-22 00:01:47 -07005930 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005931
WANG Cong9d6acb32017-03-01 20:48:39 -08005932 if (rt == net->ipv6.ip6_null_entry) {
5933 err = rt->dst.error;
5934 ip6_rt_put(rt);
5935 goto errout;
5936 }
5937
Linus Torvalds1da177e2005-04-16 15:20:36 -07005938 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
David S. Miller38308472011-12-03 18:02:47 -05005939 if (!skb) {
Amerigo Wang94e187c2012-10-29 00:13:19 +00005940 ip6_rt_put(rt);
Thomas Grafab364a62006-08-22 00:01:47 -07005941 err = -ENOBUFS;
5942 goto errout;
5943 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07005944
Changli Gaod8d1f302010-06-10 23:31:35 -07005945 skb_dst_set(skb, &rt->dst);
David Aherna68886a2018-04-20 15:38:02 -07005946
5947 rcu_read_lock();
5948 from = rcu_dereference(rt->from);
Martin KaFai Lau886b7a52019-04-30 10:45:12 -07005949 if (from) {
5950 if (fibmatch)
5951 err = rt6_fill_node(net, skb, from, NULL, NULL, NULL,
5952 iif, RTM_NEWROUTE,
5953 NETLINK_CB(in_skb).portid,
5954 nlh->nlmsg_seq, 0);
5955 else
5956 err = rt6_fill_node(net, skb, from, dst, &fl6.daddr,
5957 &fl6.saddr, iif, RTM_NEWROUTE,
5958 NETLINK_CB(in_skb).portid,
5959 nlh->nlmsg_seq, 0);
5960 } else {
5961 err = -ENETUNREACH;
5962 }
David Aherna68886a2018-04-20 15:38:02 -07005963 rcu_read_unlock();
5964
Linus Torvalds1da177e2005-04-16 15:20:36 -07005965 if (err < 0) {
Thomas Grafab364a62006-08-22 00:01:47 -07005966 kfree_skb(skb);
5967 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005968 }
5969
Eric W. Biederman15e47302012-09-07 20:12:54 +00005970 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
Thomas Grafab364a62006-08-22 00:01:47 -07005971errout:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005972 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005973}
5974
David Ahern8d1c8022018-04-17 17:33:26 -07005975void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
Roopa Prabhu37a1d362015-09-13 10:18:33 -07005976 unsigned int nlm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005977{
5978 struct sk_buff *skb;
Daniel Lezcano55786892008-03-04 13:47:47 -08005979 struct net *net = info->nl_net;
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08005980 u32 seq;
5981 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005982
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08005983 err = -ENOBUFS;
David S. Miller38308472011-12-03 18:02:47 -05005984 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
Thomas Graf86872cb2006-08-22 00:01:08 -07005985
Roopa Prabhu19e42e42015-07-21 10:43:48 +02005986 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
David S. Miller38308472011-12-03 18:02:47 -05005987 if (!skb)
Thomas Graf21713eb2006-08-15 00:35:24 -07005988 goto errout;
5989
David Ahernd4ead6b2018-04-17 17:33:16 -07005990 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
5991 event, info->portid, seq, nlm_flags);
Patrick McHardy26932562007-01-31 23:16:40 -08005992 if (err < 0) {
5993 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
5994 WARN_ON(err == -EMSGSIZE);
5995 kfree_skb(skb);
5996 goto errout;
5997 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00005998 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08005999 info->nlh, gfp_any());
6000 return;
Thomas Graf21713eb2006-08-15 00:35:24 -07006001errout:
6002 if (err < 0)
Daniel Lezcano55786892008-03-04 13:47:47 -08006003 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006004}
6005
David Ahern19a3b7e2019-05-22 12:04:41 -07006006void fib6_rt_update(struct net *net, struct fib6_info *rt,
6007 struct nl_info *info)
6008{
6009 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
6010 struct sk_buff *skb;
6011 int err = -ENOBUFS;
6012
6013 /* call_fib6_entry_notifiers will be removed when in-kernel notifier
6014 * is implemented and supported for nexthop objects
6015 */
6016 call_fib6_entry_notifiers(net, FIB_EVENT_ENTRY_REPLACE, rt, NULL);
6017
6018 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
6019 if (!skb)
6020 goto errout;
6021
6022 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
6023 RTM_NEWROUTE, info->portid, seq, NLM_F_REPLACE);
6024 if (err < 0) {
6025 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
6026 WARN_ON(err == -EMSGSIZE);
6027 kfree_skb(skb);
6028 goto errout;
6029 }
6030 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
6031 info->nlh, gfp_any());
6032 return;
6033errout:
6034 if (err < 0)
6035 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
6036}
6037
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006038static int ip6_route_dev_notify(struct notifier_block *this,
Jiri Pirko351638e2013-05-28 01:30:21 +00006039 unsigned long event, void *ptr)
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006040{
Jiri Pirko351638e2013-05-28 01:30:21 +00006041 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09006042 struct net *net = dev_net(dev);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006043
WANG Cong242d3a42017-05-08 10:12:13 -07006044 if (!(dev->flags & IFF_LOOPBACK))
6045 return NOTIFY_OK;
6046
6047 if (event == NETDEV_REGISTER) {
David Ahern1cf844c2019-05-22 20:27:59 -07006048 net->ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = dev;
Changli Gaod8d1f302010-06-10 23:31:35 -07006049 net->ipv6.ip6_null_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006050 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
6051#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Changli Gaod8d1f302010-06-10 23:31:35 -07006052 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006053 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
Changli Gaod8d1f302010-06-10 23:31:35 -07006054 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006055 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
6056#endif
WANG Cong76da0702017-06-20 11:42:27 -07006057 } else if (event == NETDEV_UNREGISTER &&
6058 dev->reg_state != NETREG_UNREGISTERED) {
6059 /* NETDEV_UNREGISTER could be fired for multiple times by
6060 * netdev_wait_allrefs(). Make sure we only call this once.
6061 */
Eric Dumazet12d94a82017-08-15 04:09:51 -07006062 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
WANG Cong242d3a42017-05-08 10:12:13 -07006063#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Eric Dumazet12d94a82017-08-15 04:09:51 -07006064 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
6065 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
WANG Cong242d3a42017-05-08 10:12:13 -07006066#endif
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006067 }
6068
6069 return NOTIFY_OK;
6070}
6071
Linus Torvalds1da177e2005-04-16 15:20:36 -07006072/*
6073 * /proc
6074 */
6075
6076#ifdef CONFIG_PROC_FS
Linus Torvalds1da177e2005-04-16 15:20:36 -07006077static int rt6_stats_seq_show(struct seq_file *seq, void *v)
6078{
Daniel Lezcano69ddb802008-03-04 13:46:23 -08006079 struct net *net = (struct net *)seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006080 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
Daniel Lezcano69ddb802008-03-04 13:46:23 -08006081 net->ipv6.rt6_stats->fib_nodes,
6082 net->ipv6.rt6_stats->fib_route_nodes,
Wei Wang81eb8442017-10-06 12:06:11 -07006083 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
Daniel Lezcano69ddb802008-03-04 13:46:23 -08006084 net->ipv6.rt6_stats->fib_rt_entries,
6085 net->ipv6.rt6_stats->fib_rt_cache,
Eric Dumazetfc66f952010-10-08 06:37:34 +00006086 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
Daniel Lezcano69ddb802008-03-04 13:46:23 -08006087 net->ipv6.rt6_stats->fib_discarded_routes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006088
6089 return 0;
6090}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006091#endif /* CONFIG_PROC_FS */
6092
6093#ifdef CONFIG_SYSCTL
6094
Christoph Hellwig32927392020-04-24 08:43:38 +02006095static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
6096 void *buffer, size_t *lenp, loff_t *ppos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006097{
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00006098 struct net *net;
6099 int delay;
Aditya Pakkif0fb9b22018-12-24 10:30:17 -06006100 int ret;
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00006101 if (!write)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006102 return -EINVAL;
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00006103
6104 net = (struct net *)ctl->extra1;
6105 delay = net->ipv6.sysctl.flush_delay;
Aditya Pakkif0fb9b22018-12-24 10:30:17 -06006106 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6107 if (ret)
6108 return ret;
6109
Michal Kubeček2ac3ac82013-08-01 10:04:14 +02006110 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00006111 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006112}
6113
David Aherned792e22018-10-08 14:06:34 -07006114static struct ctl_table ipv6_route_table_template[] = {
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006115 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006116 .procname = "flush",
Daniel Lezcano49905092008-01-10 03:01:01 -08006117 .data = &init_net.ipv6.sysctl.flush_delay,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006118 .maxlen = sizeof(int),
Dave Jones89c8b3a12005-04-28 12:11:49 -07006119 .mode = 0200,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08006120 .proc_handler = ipv6_sysctl_rtcache_flush
Linus Torvalds1da177e2005-04-16 15:20:36 -07006121 },
6122 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006123 .procname = "gc_thresh",
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -08006124 .data = &ip6_dst_ops_template.gc_thresh,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006125 .maxlen = sizeof(int),
6126 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08006127 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006128 },
6129 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006130 .procname = "max_size",
Daniel Lezcano49905092008-01-10 03:01:01 -08006131 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006132 .maxlen = sizeof(int),
6133 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08006134 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006135 },
6136 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006137 .procname = "gc_min_interval",
Daniel Lezcano49905092008-01-10 03:01:01 -08006138 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006139 .maxlen = sizeof(int),
6140 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08006141 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006142 },
6143 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006144 .procname = "gc_timeout",
Daniel Lezcano49905092008-01-10 03:01:01 -08006145 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006146 .maxlen = sizeof(int),
6147 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08006148 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006149 },
6150 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006151 .procname = "gc_interval",
Daniel Lezcano49905092008-01-10 03:01:01 -08006152 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006153 .maxlen = sizeof(int),
6154 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08006155 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006156 },
6157 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006158 .procname = "gc_elasticity",
Daniel Lezcano49905092008-01-10 03:01:01 -08006159 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006160 .maxlen = sizeof(int),
6161 .mode = 0644,
Min Zhangf3d3f612010-08-14 22:42:51 -07006162 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006163 },
6164 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006165 .procname = "mtu_expires",
Daniel Lezcano49905092008-01-10 03:01:01 -08006166 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006167 .maxlen = sizeof(int),
6168 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08006169 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006170 },
6171 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006172 .procname = "min_adv_mss",
Daniel Lezcano49905092008-01-10 03:01:01 -08006173 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006174 .maxlen = sizeof(int),
6175 .mode = 0644,
Min Zhangf3d3f612010-08-14 22:42:51 -07006176 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006177 },
6178 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07006179 .procname = "gc_min_interval_ms",
Daniel Lezcano49905092008-01-10 03:01:01 -08006180 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006181 .maxlen = sizeof(int),
6182 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08006183 .proc_handler = proc_dointvec_ms_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07006184 },
David Ahern7c6bb7d2018-10-11 20:17:21 -07006185 {
6186 .procname = "skip_notify_on_dev_down",
6187 .data = &init_net.ipv6.sysctl.skip_notify_on_dev_down,
6188 .maxlen = sizeof(int),
6189 .mode = 0644,
Eiichi Tsukatab8e8a862019-06-25 12:08:01 +09006190 .proc_handler = proc_dointvec_minmax,
Matteo Croceeec48442019-07-18 15:58:50 -07006191 .extra1 = SYSCTL_ZERO,
6192 .extra2 = SYSCTL_ONE,
David Ahern7c6bb7d2018-10-11 20:17:21 -07006193 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08006194 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006195};
6196
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00006197struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
Daniel Lezcano760f2d02008-01-10 02:53:43 -08006198{
6199 struct ctl_table *table;
6200
6201 table = kmemdup(ipv6_route_table_template,
6202 sizeof(ipv6_route_table_template),
6203 GFP_KERNEL);
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09006204
6205 if (table) {
6206 table[0].data = &net->ipv6.sysctl.flush_delay;
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00006207 table[0].extra1 = net;
Alexey Dobriyan86393e52009-08-29 01:34:49 +00006208 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09006209 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
6210 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
6211 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
6212 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
6213 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
6214 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
6215 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
Alexey Dobriyan9c69fab2009-12-18 20:11:03 -08006216 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
David Ahern7c6bb7d2018-10-11 20:17:21 -07006217 table[10].data = &net->ipv6.sysctl.skip_notify_on_dev_down;
Eric W. Biederman464dc802012-11-16 03:02:59 +00006218
6219 /* Don't export sysctls to unprivileged users */
6220 if (net->user_ns != &init_user_ns)
6221 table[0].procname = NULL;
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09006222 }
6223
Daniel Lezcano760f2d02008-01-10 02:53:43 -08006224 return table;
6225}
Linus Torvalds1da177e2005-04-16 15:20:36 -07006226#endif
6227
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00006228static int __net_init ip6_route_net_init(struct net *net)
Daniel Lezcanocdb18762008-03-04 13:45:33 -08006229{
Pavel Emelyanov633d424b2008-04-21 14:25:23 -07006230 int ret = -ENOMEM;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006231
Alexey Dobriyan86393e52009-08-29 01:34:49 +00006232 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
6233 sizeof(net->ipv6.ip6_dst_ops));
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08006234
Eric Dumazetfc66f952010-10-08 06:37:34 +00006235 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
6236 goto out_ip6_dst_ops;
6237
David Ahern1cf844c2019-05-22 20:27:59 -07006238 net->ipv6.fib6_null_entry = fib6_info_alloc(GFP_KERNEL, true);
David Ahern421842e2018-04-17 17:33:18 -07006239 if (!net->ipv6.fib6_null_entry)
6240 goto out_ip6_dst_entries;
David Ahern1cf844c2019-05-22 20:27:59 -07006241 memcpy(net->ipv6.fib6_null_entry, &fib6_null_entry_template,
6242 sizeof(*net->ipv6.fib6_null_entry));
David Ahern421842e2018-04-17 17:33:18 -07006243
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006244 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
6245 sizeof(*net->ipv6.ip6_null_entry),
6246 GFP_KERNEL);
6247 if (!net->ipv6.ip6_null_entry)
David Ahern421842e2018-04-17 17:33:18 -07006248 goto out_fib6_null_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07006249 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08006250 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
6251 ip6_template_metrics, true);
Wei Wang74109212019-06-20 17:36:38 -07006252 INIT_LIST_HEAD(&net->ipv6.ip6_null_entry->rt6i_uncached);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006253
6254#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Vincent Bernatfeca7d82017-08-08 20:23:49 +02006255 net->ipv6.fib6_has_custom_rules = false;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006256 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
6257 sizeof(*net->ipv6.ip6_prohibit_entry),
6258 GFP_KERNEL);
Peter Zijlstra68fffc62008-10-07 14:12:10 -07006259 if (!net->ipv6.ip6_prohibit_entry)
6260 goto out_ip6_null_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07006261 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08006262 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
6263 ip6_template_metrics, true);
Wei Wang74109212019-06-20 17:36:38 -07006264 INIT_LIST_HEAD(&net->ipv6.ip6_prohibit_entry->rt6i_uncached);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006265
6266 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
6267 sizeof(*net->ipv6.ip6_blk_hole_entry),
6268 GFP_KERNEL);
Peter Zijlstra68fffc62008-10-07 14:12:10 -07006269 if (!net->ipv6.ip6_blk_hole_entry)
6270 goto out_ip6_prohibit_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07006271 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08006272 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
6273 ip6_template_metrics, true);
Wei Wang74109212019-06-20 17:36:38 -07006274 INIT_LIST_HEAD(&net->ipv6.ip6_blk_hole_entry->rt6i_uncached);
Paolo Abenib9b33e72019-11-20 13:47:34 +01006275#ifdef CONFIG_IPV6_SUBTREES
6276 net->ipv6.fib6_routes_require_src = 0;
6277#endif
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006278#endif
6279
Peter Zijlstrab339a47c2008-10-07 14:15:00 -07006280 net->ipv6.sysctl.flush_delay = 0;
6281 net->ipv6.sysctl.ip6_rt_max_size = 4096;
6282 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
6283 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
6284 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
6285 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
6286 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
6287 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
David Ahern7c6bb7d2018-10-11 20:17:21 -07006288 net->ipv6.sysctl.skip_notify_on_dev_down = 0;
Peter Zijlstrab339a47c2008-10-07 14:15:00 -07006289
Benjamin Thery6891a342008-03-04 13:49:47 -08006290 net->ipv6.ip6_rt_gc_expire = 30*HZ;
6291
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006292 ret = 0;
6293out:
6294 return ret;
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08006295
Peter Zijlstra68fffc62008-10-07 14:12:10 -07006296#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6297out_ip6_prohibit_entry:
6298 kfree(net->ipv6.ip6_prohibit_entry);
6299out_ip6_null_entry:
6300 kfree(net->ipv6.ip6_null_entry);
6301#endif
David Ahern421842e2018-04-17 17:33:18 -07006302out_fib6_null_entry:
6303 kfree(net->ipv6.fib6_null_entry);
Eric Dumazetfc66f952010-10-08 06:37:34 +00006304out_ip6_dst_entries:
6305 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08006306out_ip6_dst_ops:
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08006307 goto out;
Daniel Lezcanocdb18762008-03-04 13:45:33 -08006308}
6309
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00006310static void __net_exit ip6_route_net_exit(struct net *net)
Daniel Lezcanocdb18762008-03-04 13:45:33 -08006311{
David Ahern421842e2018-04-17 17:33:18 -07006312 kfree(net->ipv6.fib6_null_entry);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006313 kfree(net->ipv6.ip6_null_entry);
6314#ifdef CONFIG_IPV6_MULTIPLE_TABLES
6315 kfree(net->ipv6.ip6_prohibit_entry);
6316 kfree(net->ipv6.ip6_blk_hole_entry);
6317#endif
Xiaotian Feng41bb78b2010-11-02 16:11:05 +00006318 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
Daniel Lezcanocdb18762008-03-04 13:45:33 -08006319}
6320
Thomas Grafd1896342012-06-18 12:08:33 +00006321static int __net_init ip6_route_net_init_late(struct net *net)
6322{
6323#ifdef CONFIG_PROC_FS
Christoph Hellwigc3506372018-04-10 19:42:55 +02006324 proc_create_net("ipv6_route", 0, net->proc_net, &ipv6_route_seq_ops,
6325 sizeof(struct ipv6_route_iter));
Christoph Hellwig3617d942018-04-13 20:38:35 +02006326 proc_create_net_single("rt6_stats", 0444, net->proc_net,
6327 rt6_stats_seq_show, NULL);
Thomas Grafd1896342012-06-18 12:08:33 +00006328#endif
6329 return 0;
6330}
6331
6332static void __net_exit ip6_route_net_exit_late(struct net *net)
6333{
6334#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00006335 remove_proc_entry("ipv6_route", net->proc_net);
6336 remove_proc_entry("rt6_stats", net->proc_net);
Thomas Grafd1896342012-06-18 12:08:33 +00006337#endif
6338}
6339
Daniel Lezcanocdb18762008-03-04 13:45:33 -08006340static struct pernet_operations ip6_route_net_ops = {
6341 .init = ip6_route_net_init,
6342 .exit = ip6_route_net_exit,
6343};
6344
David S. Millerc3426b42012-06-09 16:27:05 -07006345static int __net_init ipv6_inetpeer_init(struct net *net)
6346{
6347 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
6348
6349 if (!bp)
6350 return -ENOMEM;
6351 inet_peer_base_init(bp);
6352 net->ipv6.peers = bp;
6353 return 0;
6354}
6355
6356static void __net_exit ipv6_inetpeer_exit(struct net *net)
6357{
6358 struct inet_peer_base *bp = net->ipv6.peers;
6359
6360 net->ipv6.peers = NULL;
David S. Miller56a6b242012-06-09 16:32:41 -07006361 inetpeer_invalidate_tree(bp);
David S. Millerc3426b42012-06-09 16:27:05 -07006362 kfree(bp);
6363}
6364
David S. Miller2b823f72012-06-09 19:00:16 -07006365static struct pernet_operations ipv6_inetpeer_ops = {
David S. Millerc3426b42012-06-09 16:27:05 -07006366 .init = ipv6_inetpeer_init,
6367 .exit = ipv6_inetpeer_exit,
6368};
6369
Thomas Grafd1896342012-06-18 12:08:33 +00006370static struct pernet_operations ip6_route_net_late_ops = {
6371 .init = ip6_route_net_init_late,
6372 .exit = ip6_route_net_exit_late,
6373};
6374
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006375static struct notifier_block ip6_route_dev_notifier = {
6376 .notifier_call = ip6_route_dev_notify,
WANG Cong242d3a42017-05-08 10:12:13 -07006377 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006378};
6379
WANG Cong2f460932017-05-03 22:07:31 -07006380void __init ip6_route_init_special_entries(void)
6381{
6382 /* Registering of the loopback is done before this portion of code,
6383 * the loopback reference in rt6_info will not be taken, do it
6384 * manually for init_net */
David Ahern1cf844c2019-05-22 20:27:59 -07006385 init_net.ipv6.fib6_null_entry->fib6_nh->fib_nh_dev = init_net.loopback_dev;
WANG Cong2f460932017-05-03 22:07:31 -07006386 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
6387 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6388 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
6389 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
6390 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6391 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
6392 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
6393 #endif
6394}
6395
Yonghong Song138d0be2020-05-09 10:59:10 -07006396#if IS_BUILTIN(CONFIG_IPV6)
6397#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6398DEFINE_BPF_ITER_FUNC(ipv6_route, struct bpf_iter_meta *meta, struct fib6_info *rt)
6399
Yonghong Song15172a42020-05-13 11:02:19 -07006400static const struct bpf_iter_reg ipv6_route_reg_info = {
6401 .target = "ipv6_route",
6402 .seq_ops = &ipv6_route_seq_ops,
6403 .init_seq_private = bpf_iter_init_seq_net,
6404 .fini_seq_private = bpf_iter_fini_seq_net,
6405 .seq_priv_size = sizeof(struct ipv6_route_iter),
6406};
6407
Yonghong Song138d0be2020-05-09 10:59:10 -07006408static int __init bpf_iter_register(void)
6409{
Yonghong Song15172a42020-05-13 11:02:19 -07006410 return bpf_iter_reg_target(&ipv6_route_reg_info);
Yonghong Song138d0be2020-05-09 10:59:10 -07006411}
6412
6413static void bpf_iter_unregister(void)
6414{
6415 bpf_iter_unreg_target("ipv6_route");
6416}
6417#endif
6418#endif
6419
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006420int __init ip6_route_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006421{
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006422 int ret;
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -07006423 int cpu;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006424
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -08006425 ret = -ENOMEM;
6426 ip6_dst_ops_template.kmem_cachep =
6427 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
6428 SLAB_HWCACHE_ALIGN, NULL);
6429 if (!ip6_dst_ops_template.kmem_cachep)
Fernando Carrijoc19a28e2009-01-07 18:09:08 -08006430 goto out;
David S. Miller14e50e52007-05-24 18:17:54 -07006431
Eric Dumazetfc66f952010-10-08 06:37:34 +00006432 ret = dst_entries_init(&ip6_dst_blackhole_ops);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006433 if (ret)
Daniel Lezcanobdb32892008-03-04 13:48:10 -08006434 goto out_kmem_cache;
Daniel Lezcanobdb32892008-03-04 13:48:10 -08006435
David S. Millerc3426b42012-06-09 16:27:05 -07006436 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
6437 if (ret)
David S. Millere8803b62012-06-16 01:12:19 -07006438 goto out_dst_entries;
Thomas Graf2a0c4512012-06-14 23:00:17 +00006439
David S. Miller7e52b332012-06-15 15:51:55 -07006440 ret = register_pernet_subsys(&ip6_route_net_ops);
6441 if (ret)
6442 goto out_register_inetpeer;
David S. Millerc3426b42012-06-09 16:27:05 -07006443
Arnaud Ebalard5dc121e2008-10-01 02:37:56 -07006444 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
6445
David S. Millere8803b62012-06-16 01:12:19 -07006446 ret = fib6_init();
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006447 if (ret)
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006448 goto out_register_subsys;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006449
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006450 ret = xfrm6_init();
6451 if (ret)
David S. Millere8803b62012-06-16 01:12:19 -07006452 goto out_fib6_init;
Daniel Lezcanoc35b7e72007-12-08 00:14:11 -08006453
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006454 ret = fib6_rules_init();
6455 if (ret)
6456 goto xfrm6_init;
Daniel Lezcano7e5449c2007-12-08 00:14:54 -08006457
Thomas Grafd1896342012-06-18 12:08:33 +00006458 ret = register_pernet_subsys(&ip6_route_net_late_ops);
6459 if (ret)
6460 goto fib6_rules_init;
6461
Florian Westphal16feebc2017-12-02 21:44:08 +01006462 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
6463 inet6_rtm_newroute, NULL, 0);
6464 if (ret < 0)
6465 goto out_register_late_subsys;
6466
6467 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
6468 inet6_rtm_delroute, NULL, 0);
6469 if (ret < 0)
6470 goto out_register_late_subsys;
6471
6472 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
6473 inet6_rtm_getroute, NULL,
6474 RTNL_FLAG_DOIT_UNLOCKED);
6475 if (ret < 0)
Thomas Grafd1896342012-06-18 12:08:33 +00006476 goto out_register_late_subsys;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006477
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006478 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
Daniel Lezcanocdb18762008-03-04 13:45:33 -08006479 if (ret)
Thomas Grafd1896342012-06-18 12:08:33 +00006480 goto out_register_late_subsys;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006481
Yonghong Song138d0be2020-05-09 10:59:10 -07006482#if IS_BUILTIN(CONFIG_IPV6)
6483#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6484 ret = bpf_iter_register();
6485 if (ret)
6486 goto out_register_late_subsys;
6487#endif
6488#endif
6489
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -07006490 for_each_possible_cpu(cpu) {
6491 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
6492
6493 INIT_LIST_HEAD(&ul->head);
6494 spin_lock_init(&ul->lock);
6495 }
6496
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006497out:
6498 return ret;
6499
Thomas Grafd1896342012-06-18 12:08:33 +00006500out_register_late_subsys:
Florian Westphal16feebc2017-12-02 21:44:08 +01006501 rtnl_unregister_all(PF_INET6);
Thomas Grafd1896342012-06-18 12:08:33 +00006502 unregister_pernet_subsys(&ip6_route_net_late_ops);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006503fib6_rules_init:
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006504 fib6_rules_cleanup();
6505xfrm6_init:
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006506 xfrm6_fini();
Thomas Graf2a0c4512012-06-14 23:00:17 +00006507out_fib6_init:
6508 fib6_gc_cleanup();
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006509out_register_subsys:
6510 unregister_pernet_subsys(&ip6_route_net_ops);
David S. Miller7e52b332012-06-15 15:51:55 -07006511out_register_inetpeer:
6512 unregister_pernet_subsys(&ipv6_inetpeer_ops);
Eric Dumazetfc66f952010-10-08 06:37:34 +00006513out_dst_entries:
6514 dst_entries_destroy(&ip6_dst_blackhole_ops);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006515out_kmem_cache:
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08006516 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08006517 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006518}
6519
6520void ip6_route_cleanup(void)
6521{
Yonghong Song138d0be2020-05-09 10:59:10 -07006522#if IS_BUILTIN(CONFIG_IPV6)
6523#if defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_PROC_FS)
6524 bpf_iter_unregister();
6525#endif
6526#endif
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006527 unregister_netdevice_notifier(&ip6_route_dev_notifier);
Thomas Grafd1896342012-06-18 12:08:33 +00006528 unregister_pernet_subsys(&ip6_route_net_late_ops);
Thomas Graf101367c2006-08-04 03:39:02 -07006529 fib6_rules_cleanup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006530 xfrm6_fini();
Linus Torvalds1da177e2005-04-16 15:20:36 -07006531 fib6_gc_cleanup();
David S. Millerc3426b42012-06-09 16:27:05 -07006532 unregister_pernet_subsys(&ipv6_inetpeer_ops);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08006533 unregister_pernet_subsys(&ip6_route_net_ops);
Xiaotian Feng41bb78b2010-11-02 16:11:05 +00006534 dst_entries_destroy(&ip6_dst_blackhole_ops);
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08006535 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006536}