blob: 7c141394d4f10d311e4f97d3a22babf7d786f3f7 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Linux INET6 implementation
3 * FIB front-end.
4 *
5 * Authors:
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09006 * Pedro Roque <roque@di.fc.ul.pt>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14/* Changes:
15 *
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
YOSHIFUJI Hideakic0bece92006-08-23 17:23:25 -070023 * Ville Nuorvala
24 * Fixed routing subtrees.
Linus Torvalds1da177e2005-04-16 15:20:36 -070025 */
26
Joe Perchesf3213832012-05-15 14:11:53 +000027#define pr_fmt(fmt) "IPv6: " fmt
28
Randy Dunlap4fc268d2006-01-11 12:17:47 -080029#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/errno.h>
Paul Gortmakerbc3b2d72011-07-15 11:47:34 -040031#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/types.h>
33#include <linux/times.h>
34#include <linux/socket.h>
35#include <linux/sockios.h>
36#include <linux/net.h>
37#include <linux/route.h>
38#include <linux/netdevice.h>
39#include <linux/in6.h>
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +090040#include <linux/mroute6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/proc_fs.h>
44#include <linux/seq_file.h>
Daniel Lezcano5b7c9312008-03-03 23:28:58 -080045#include <linux/nsproxy.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090046#include <linux/slab.h>
Wei Wang35732d02017-10-06 12:05:57 -070047#include <linux/jhash.h>
Eric W. Biederman457c4cb2007-09-12 12:01:34 +020048#include <net/net_namespace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <net/snmp.h>
50#include <net/ipv6.h>
51#include <net/ip6_fib.h>
52#include <net/ip6_route.h>
53#include <net/ndisc.h>
54#include <net/addrconf.h>
55#include <net/tcp.h>
56#include <linux/rtnetlink.h>
57#include <net/dst.h>
Jiri Benc904af042015-08-20 13:56:31 +020058#include <net/dst_metadata.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <net/xfrm.h>
Tom Tucker8d717402006-07-30 20:43:36 -070060#include <net/netevent.h>
Thomas Graf21713eb2006-08-15 00:35:24 -070061#include <net/netlink.h>
Nicolas Dichtel51ebd312012-10-22 03:42:09 +000062#include <net/nexthop.h>
Roopa Prabhu19e42e42015-07-21 10:43:48 +020063#include <net/lwtunnel.h>
Jiri Benc904af042015-08-20 13:56:31 +020064#include <net/ip_tunnels.h>
David Ahernca254492015-10-12 11:47:10 -070065#include <net/l3mdev.h>
David Ahernb8115802015-11-19 12:24:22 -080066#include <trace/events/fib6.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080068#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070069
70#ifdef CONFIG_SYSCTL
71#include <linux/sysctl.h>
72#endif
73
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +020074enum rt6_nud_state {
Jiri Benc7e980562013-12-11 13:48:20 +010075 RT6_NUD_FAIL_HARD = -3,
76 RT6_NUD_FAIL_PROBE = -2,
77 RT6_NUD_FAIL_DO_RR = -1,
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +020078 RT6_NUD_SUCCEED = 1
79};
80
Linus Torvalds1da177e2005-04-16 15:20:36 -070081static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
David S. Miller0dbaee32010-12-13 12:52:14 -080082static unsigned int ip6_default_advmss(const struct dst_entry *dst);
Steffen Klassertebb762f2011-11-23 02:12:51 +000083static unsigned int ip6_mtu(const struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -070084static struct dst_entry *ip6_negative_advice(struct dst_entry *);
85static void ip6_dst_destroy(struct dst_entry *);
86static void ip6_dst_ifdown(struct dst_entry *,
87 struct net_device *dev, int how);
Daniel Lezcano569d3642008-01-18 03:56:57 -080088static int ip6_dst_gc(struct dst_ops *ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90static int ip6_pkt_discard(struct sk_buff *skb);
Eric W. Biedermanede20592015-10-07 16:48:47 -050091static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
Kamala R7150aed2013-12-02 19:55:21 +053092static int ip6_pkt_prohibit(struct sk_buff *skb);
Eric W. Biedermanede20592015-10-07 16:48:47 -050093static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094static void ip6_link_failure(struct sk_buff *skb);
David S. Miller6700c272012-07-17 03:29:28 -070095static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
96 struct sk_buff *skb, u32 mtu);
97static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
98 struct sk_buff *skb);
Nicolas Dichtel52bd4c02013-06-28 17:35:48 +020099static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
David Ahern16a16cd2017-02-02 12:37:11 -0800100static size_t rt6_nlmsg_size(struct rt6_info *rt);
David Ahernd4ead6b2018-04-17 17:33:16 -0700101static int rt6_fill_node(struct net *net, struct sk_buff *skb,
102 struct rt6_info *rt, struct dst_entry *dst,
103 struct in6_addr *dest, struct in6_addr *src,
David Ahern16a16cd2017-02-02 12:37:11 -0800104 int iif, int type, u32 portid, u32 seq,
105 unsigned int flags);
Wei Wang35732d02017-10-06 12:05:57 -0700106static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
107 struct in6_addr *daddr,
108 struct in6_addr *saddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800110#ifdef CONFIG_IPV6_ROUTE_INFO
Daniel Lezcanoefa2cea2008-03-04 13:46:48 -0800111static struct rt6_info *rt6_add_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000112 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -0700113 const struct in6_addr *gwaddr,
114 struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +0000115 unsigned int pref);
Daniel Lezcanoefa2cea2008-03-04 13:46:48 -0800116static struct rt6_info *rt6_get_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000117 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -0700118 const struct in6_addr *gwaddr,
119 struct net_device *dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800120#endif
121
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700122struct uncached_list {
123 spinlock_t lock;
124 struct list_head head;
125};
126
127static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
128
Xin Long510c3212018-02-14 19:06:02 +0800129void rt6_uncached_list_add(struct rt6_info *rt)
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700130{
131 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
132
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700133 rt->rt6i_uncached_list = ul;
134
135 spin_lock_bh(&ul->lock);
136 list_add_tail(&rt->rt6i_uncached, &ul->head);
137 spin_unlock_bh(&ul->lock);
138}
139
Xin Long510c3212018-02-14 19:06:02 +0800140void rt6_uncached_list_del(struct rt6_info *rt)
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700141{
142 if (!list_empty(&rt->rt6i_uncached)) {
143 struct uncached_list *ul = rt->rt6i_uncached_list;
Wei Wang81eb8442017-10-06 12:06:11 -0700144 struct net *net = dev_net(rt->dst.dev);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700145
146 spin_lock_bh(&ul->lock);
147 list_del(&rt->rt6i_uncached);
Wei Wang81eb8442017-10-06 12:06:11 -0700148 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700149 spin_unlock_bh(&ul->lock);
150 }
151}
152
153static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
154{
155 struct net_device *loopback_dev = net->loopback_dev;
156 int cpu;
157
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500158 if (dev == loopback_dev)
159 return;
160
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700161 for_each_possible_cpu(cpu) {
162 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
163 struct rt6_info *rt;
164
165 spin_lock_bh(&ul->lock);
166 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
167 struct inet6_dev *rt_idev = rt->rt6i_idev;
168 struct net_device *rt_dev = rt->dst.dev;
169
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500170 if (rt_idev->dev == dev) {
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700171 rt->rt6i_idev = in6_dev_get(loopback_dev);
172 in6_dev_put(rt_idev);
173 }
174
Eric W. Biedermane332bc62015-10-12 11:02:08 -0500175 if (rt_dev == dev) {
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700176 rt->dst.dev = loopback_dev;
177 dev_hold(rt->dst.dev);
178 dev_put(rt_dev);
179 }
180 }
181 spin_unlock_bh(&ul->lock);
182 }
183}
184
David S. Millerf894cbf2012-07-02 21:52:24 -0700185static inline const void *choose_neigh_daddr(struct rt6_info *rt,
186 struct sk_buff *skb,
187 const void *daddr)
David S. Miller39232972012-01-26 15:22:32 -0500188{
189 struct in6_addr *p = &rt->rt6i_gateway;
190
David S. Millera7563f32012-01-26 16:29:16 -0500191 if (!ipv6_addr_any(p))
David S. Miller39232972012-01-26 15:22:32 -0500192 return (const void *) p;
David S. Millerf894cbf2012-07-02 21:52:24 -0700193 else if (skb)
194 return &ipv6_hdr(skb)->daddr;
David S. Miller39232972012-01-26 15:22:32 -0500195 return daddr;
196}
197
David S. Millerf894cbf2012-07-02 21:52:24 -0700198static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
199 struct sk_buff *skb,
200 const void *daddr)
David S. Millerd3aaeb32011-07-18 00:40:17 -0700201{
David S. Miller39232972012-01-26 15:22:32 -0500202 struct rt6_info *rt = (struct rt6_info *) dst;
203 struct neighbour *n;
204
David S. Millerf894cbf2012-07-02 21:52:24 -0700205 daddr = choose_neigh_daddr(rt, skb, daddr);
YOSHIFUJI Hideaki / 吉藤英明8e022ee2013-01-17 12:53:09 +0000206 n = __ipv6_neigh_lookup(dst->dev, daddr);
David S. Millerf83c7792011-12-28 15:41:23 -0500207 if (n)
208 return n;
209 return neigh_create(&nd_tbl, daddr, dst->dev);
210}
211
Julian Anastasov63fca652017-02-06 23:14:15 +0200212static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
213{
214 struct net_device *dev = dst->dev;
215 struct rt6_info *rt = (struct rt6_info *)dst;
216
217 daddr = choose_neigh_daddr(rt, NULL, daddr);
218 if (!daddr)
219 return;
220 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
221 return;
222 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
223 return;
224 __ipv6_confirm_neigh(dev, daddr);
225}
226
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -0800227static struct dst_ops ip6_dst_ops_template = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 .family = AF_INET6,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 .gc = ip6_dst_gc,
230 .gc_thresh = 1024,
231 .check = ip6_dst_check,
David S. Miller0dbaee32010-12-13 12:52:14 -0800232 .default_advmss = ip6_default_advmss,
Steffen Klassertebb762f2011-11-23 02:12:51 +0000233 .mtu = ip6_mtu,
David Ahernd4ead6b2018-04-17 17:33:16 -0700234 .cow_metrics = dst_cow_metrics_generic,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 .destroy = ip6_dst_destroy,
236 .ifdown = ip6_dst_ifdown,
237 .negative_advice = ip6_negative_advice,
238 .link_failure = ip6_link_failure,
239 .update_pmtu = ip6_rt_update_pmtu,
David S. Miller6e157b62012-07-12 00:05:02 -0700240 .redirect = rt6_do_redirect,
Eric W. Biederman9f8955c2015-10-07 16:48:39 -0500241 .local_out = __ip6_local_out,
David S. Millerd3aaeb32011-07-18 00:40:17 -0700242 .neigh_lookup = ip6_neigh_lookup,
Julian Anastasov63fca652017-02-06 23:14:15 +0200243 .confirm_neigh = ip6_confirm_neigh,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244};
245
Steffen Klassertebb762f2011-11-23 02:12:51 +0000246static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
Roland Dreierec831ea2011-01-31 13:16:00 -0800247{
Steffen Klassert618f9bc2011-11-23 02:13:31 +0000248 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
249
250 return mtu ? : dst->dev->mtu;
Roland Dreierec831ea2011-01-31 13:16:00 -0800251}
252
David S. Miller6700c272012-07-17 03:29:28 -0700253static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
254 struct sk_buff *skb, u32 mtu)
David S. Miller14e50e52007-05-24 18:17:54 -0700255{
256}
257
David S. Miller6700c272012-07-17 03:29:28 -0700258static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
259 struct sk_buff *skb)
David S. Millerb587ee32012-07-12 00:39:24 -0700260{
261}
262
David S. Miller14e50e52007-05-24 18:17:54 -0700263static struct dst_ops ip6_dst_blackhole_ops = {
264 .family = AF_INET6,
David S. Miller14e50e52007-05-24 18:17:54 -0700265 .destroy = ip6_dst_destroy,
266 .check = ip6_dst_check,
Steffen Klassertebb762f2011-11-23 02:12:51 +0000267 .mtu = ip6_blackhole_mtu,
Eric Dumazet214f45c2011-02-18 11:39:01 -0800268 .default_advmss = ip6_default_advmss,
David S. Miller14e50e52007-05-24 18:17:54 -0700269 .update_pmtu = ip6_rt_blackhole_update_pmtu,
David S. Millerb587ee32012-07-12 00:39:24 -0700270 .redirect = ip6_rt_blackhole_redirect,
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -0700271 .cow_metrics = dst_cow_metrics_generic,
David S. Millerd3aaeb32011-07-18 00:40:17 -0700272 .neigh_lookup = ip6_neigh_lookup,
David S. Miller14e50e52007-05-24 18:17:54 -0700273};
274
David S. Miller62fa8a82011-01-26 20:51:05 -0800275static const u32 ip6_template_metrics[RTAX_MAX] = {
Li RongQing14edd872012-10-24 14:01:18 +0800276 [RTAX_HOPLIMIT - 1] = 0,
David S. Miller62fa8a82011-01-26 20:51:05 -0800277};
278
David Ahern421842e2018-04-17 17:33:18 -0700279static const struct rt6_info fib6_null_entry_template = {
280 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
281 .rt6i_protocol = RTPROT_KERNEL,
282 .rt6i_metric = ~(u32)0,
283 .rt6i_ref = ATOMIC_INIT(1),
284 .fib6_type = RTN_UNREACHABLE,
285 .fib6_metrics = (struct dst_metrics *)&dst_default_metrics,
286};
287
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000288static const struct rt6_info ip6_null_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700289 .dst = {
290 .__refcnt = ATOMIC_INIT(1),
291 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000292 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700293 .error = -ENETUNREACH,
Changli Gaod8d1f302010-06-10 23:31:35 -0700294 .input = ip6_pkt_discard,
295 .output = ip6_pkt_discard_out,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 },
297 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Jean-Mickael Guerin4f724272009-05-20 17:38:59 -0700298 .rt6i_protocol = RTPROT_KERNEL,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 .rt6i_metric = ~(u32) 0,
300 .rt6i_ref = ATOMIC_INIT(1),
David Aherne8478e82018-04-17 17:33:13 -0700301 .fib6_type = RTN_UNREACHABLE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302};
303
Thomas Graf101367c2006-08-04 03:39:02 -0700304#ifdef CONFIG_IPV6_MULTIPLE_TABLES
305
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000306static const struct rt6_info ip6_prohibit_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700307 .dst = {
308 .__refcnt = ATOMIC_INIT(1),
309 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000310 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700311 .error = -EACCES,
Changli Gaod8d1f302010-06-10 23:31:35 -0700312 .input = ip6_pkt_prohibit,
313 .output = ip6_pkt_prohibit_out,
Thomas Graf101367c2006-08-04 03:39:02 -0700314 },
315 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Jean-Mickael Guerin4f724272009-05-20 17:38:59 -0700316 .rt6i_protocol = RTPROT_KERNEL,
Thomas Graf101367c2006-08-04 03:39:02 -0700317 .rt6i_metric = ~(u32) 0,
318 .rt6i_ref = ATOMIC_INIT(1),
David Aherne8478e82018-04-17 17:33:13 -0700319 .fib6_type = RTN_PROHIBIT,
Thomas Graf101367c2006-08-04 03:39:02 -0700320};
321
Eric Dumazetfb0af4c2012-09-11 21:47:51 +0000322static const struct rt6_info ip6_blk_hole_entry_template = {
Changli Gaod8d1f302010-06-10 23:31:35 -0700323 .dst = {
324 .__refcnt = ATOMIC_INIT(1),
325 .__use = 1,
Nicolas Dichtel2c20cbd2012-09-10 22:09:47 +0000326 .obsolete = DST_OBSOLETE_FORCE_CHK,
Changli Gaod8d1f302010-06-10 23:31:35 -0700327 .error = -EINVAL,
Changli Gaod8d1f302010-06-10 23:31:35 -0700328 .input = dst_discard,
Eric W. Biedermanede20592015-10-07 16:48:47 -0500329 .output = dst_discard_out,
Thomas Graf101367c2006-08-04 03:39:02 -0700330 },
331 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
Jean-Mickael Guerin4f724272009-05-20 17:38:59 -0700332 .rt6i_protocol = RTPROT_KERNEL,
Thomas Graf101367c2006-08-04 03:39:02 -0700333 .rt6i_metric = ~(u32) 0,
334 .rt6i_ref = ATOMIC_INIT(1),
David Aherne8478e82018-04-17 17:33:13 -0700335 .fib6_type = RTN_BLACKHOLE,
Thomas Graf101367c2006-08-04 03:39:02 -0700336};
337
338#endif
339
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700340static void rt6_info_init(struct rt6_info *rt)
341{
342 struct dst_entry *dst = &rt->dst;
343
344 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
345 INIT_LIST_HEAD(&rt->rt6i_siblings);
346 INIT_LIST_HEAD(&rt->rt6i_uncached);
David Ahernd4ead6b2018-04-17 17:33:16 -0700347 rt->fib6_metrics = (struct dst_metrics *)&dst_default_metrics;
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700348}
349
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350/* allocate dst with ip6_dst_ops */
Martin KaFai Laud52d3992015-05-22 20:56:06 -0700351static struct rt6_info *__ip6_dst_alloc(struct net *net,
352 struct net_device *dev,
Martin KaFai Lauad706862015-08-14 11:05:52 -0700353 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354{
David S. Miller97bab732012-06-09 22:36:36 -0700355 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
Wei Wangb2a9c0e2017-06-17 10:42:41 -0700356 1, DST_OBSOLETE_FORCE_CHK, flags);
David S. Millercf911662011-04-28 14:31:47 -0700357
Wei Wang81eb8442017-10-06 12:06:11 -0700358 if (rt) {
Martin KaFai Lauebfa45f2015-10-15 16:39:57 -0700359 rt6_info_init(rt);
Wei Wang81eb8442017-10-06 12:06:11 -0700360 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
361 }
Steffen Klassert81048912012-07-05 23:37:09 +0000362
David S. Millercf911662011-04-28 14:31:47 -0700363 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
365
David Ahern9ab179d2016-04-07 11:10:06 -0700366struct rt6_info *ip6_dst_alloc(struct net *net,
367 struct net_device *dev,
368 int flags)
Martin KaFai Laud52d3992015-05-22 20:56:06 -0700369{
Martin KaFai Lauad706862015-08-14 11:05:52 -0700370 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
Martin KaFai Laud52d3992015-05-22 20:56:06 -0700371
372 if (rt) {
373 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
Eric Dumazetbfd8e5a2017-10-09 06:01:37 -0700374 if (!rt->rt6i_pcpu) {
Wei Wang587fea72017-06-17 10:42:36 -0700375 dst_release_immediate(&rt->dst);
Martin KaFai Laud52d3992015-05-22 20:56:06 -0700376 return NULL;
377 }
378 }
379
380 return rt;
381}
David Ahern9ab179d2016-04-07 11:10:06 -0700382EXPORT_SYMBOL(ip6_dst_alloc);
Martin KaFai Laud52d3992015-05-22 20:56:06 -0700383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384static void ip6_dst_destroy(struct dst_entry *dst)
385{
386 struct rt6_info *rt = (struct rt6_info *)dst;
Wei Wang35732d02017-10-06 12:05:57 -0700387 struct rt6_exception_bucket *bucket;
David Miller3a2232e2017-11-28 15:40:40 -0500388 struct rt6_info *from = rt->from;
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700389 struct inet6_dev *idev;
David Ahernd4ead6b2018-04-17 17:33:16 -0700390 struct dst_metrics *m;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391
Martin KaFai Lau4b32b5a2015-04-28 13:03:06 -0700392 dst_destroy_metrics_generic(dst);
Markus Elfring87775312015-07-02 16:30:24 +0200393 free_percpu(rt->rt6i_pcpu);
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -0700394 rt6_uncached_list_del(rt);
395
396 idev = rt->rt6i_idev;
David S. Miller38308472011-12-03 18:02:47 -0500397 if (idev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 rt->rt6i_idev = NULL;
399 in6_dev_put(idev);
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900400 }
Wei Wang35732d02017-10-06 12:05:57 -0700401 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1);
402 if (bucket) {
403 rt->rt6i_exception_bucket = NULL;
404 kfree(bucket);
405 }
Gao feng1716a962012-04-06 00:13:10 +0000406
David Ahernd4ead6b2018-04-17 17:33:16 -0700407 m = rt->fib6_metrics;
408 if (m != &dst_default_metrics && refcount_dec_and_test(&m->refcnt))
409 kfree(m);
410
David Miller3a2232e2017-11-28 15:40:40 -0500411 rt->from = NULL;
412 dst_release(&from->dst);
David S. Millerb3419362010-11-30 12:27:11 -0800413}
414
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
416 int how)
417{
418 struct rt6_info *rt = (struct rt6_info *)dst;
419 struct inet6_dev *idev = rt->rt6i_idev;
Denis V. Lunev5a3e55d2007-12-07 00:38:10 -0800420 struct net_device *loopback_dev =
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900421 dev_net(dev)->loopback_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422
Wei Wange5645f52017-08-14 10:44:59 -0700423 if (idev && idev->dev != loopback_dev) {
424 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
425 if (loopback_idev) {
426 rt->rt6i_idev = loopback_idev;
427 in6_dev_put(idev);
David S. Miller97cac082012-07-02 22:43:47 -0700428 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 }
430}
431
Martin KaFai Lau5973fb12015-11-11 11:51:07 -0800432static bool __rt6_check_expired(const struct rt6_info *rt)
433{
434 if (rt->rt6i_flags & RTF_EXPIRES)
435 return time_after(jiffies, rt->dst.expires);
436 else
437 return false;
438}
439
Eric Dumazeta50feda2012-05-18 18:57:34 +0000440static bool rt6_check_expired(const struct rt6_info *rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441{
Gao feng1716a962012-04-06 00:13:10 +0000442 if (rt->rt6i_flags & RTF_EXPIRES) {
443 if (time_after(jiffies, rt->dst.expires))
Eric Dumazeta50feda2012-05-18 18:57:34 +0000444 return true;
David Miller3a2232e2017-11-28 15:40:40 -0500445 } else if (rt->from) {
Xin Long1e2ea8a2017-08-26 20:10:10 +0800446 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
David Ahern14895682018-04-17 17:33:17 -0700447 fib6_check_expired(rt->from);
Gao feng1716a962012-04-06 00:13:10 +0000448 }
Eric Dumazeta50feda2012-05-18 18:57:34 +0000449 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450}
451
David Ahernb4bac172018-03-02 08:32:18 -0800452static struct rt6_info *rt6_multipath_select(const struct net *net,
453 struct rt6_info *match,
Nicolas Dichtel52bd4c02013-06-28 17:35:48 +0200454 struct flowi6 *fl6, int oif,
David Ahernb75cc8f2018-03-02 08:32:17 -0800455 const struct sk_buff *skb,
Nicolas Dichtel52bd4c02013-06-28 17:35:48 +0200456 int strict)
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000457{
458 struct rt6_info *sibling, *next_sibling;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000459
Jakub Sitnickib673d6c2017-08-23 09:58:31 +0200460 /* We might have already computed the hash for ICMPv6 errors. In such
461 * case it will always be non-zero. Otherwise now is the time to do it.
462 */
463 if (!fl6->mp_hash)
David Ahernb4bac172018-03-02 08:32:18 -0800464 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
Jakub Sitnickib673d6c2017-08-23 09:58:31 +0200465
David Ahern5e670d82018-04-17 17:33:14 -0700466 if (fl6->mp_hash <= atomic_read(&match->fib6_nh.nh_upper_bound))
Ido Schimmel3d709f62018-01-09 16:40:27 +0200467 return match;
Ido Schimmelbbfcd772017-11-21 09:50:12 +0200468
Ido Schimmel3d709f62018-01-09 16:40:27 +0200469 list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings,
470 rt6i_siblings) {
David Ahern5e670d82018-04-17 17:33:14 -0700471 int nh_upper_bound;
472
473 nh_upper_bound = atomic_read(&sibling->fib6_nh.nh_upper_bound);
474 if (fl6->mp_hash > nh_upper_bound)
Ido Schimmel3d709f62018-01-09 16:40:27 +0200475 continue;
476 if (rt6_score_route(sibling, oif, strict) < 0)
477 break;
478 match = sibling;
479 break;
480 }
481
Nicolas Dichtel51ebd312012-10-22 03:42:09 +0000482 return match;
483}
484
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485/*
Wei Wang66f5d6c2017-10-06 12:06:10 -0700486 * Route lookup. rcu_read_lock() should be held.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 */
488
Daniel Lezcano8ed67782008-03-04 13:48:30 -0800489static inline struct rt6_info *rt6_device_match(struct net *net,
490 struct rt6_info *rt,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000491 const struct in6_addr *saddr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 int oif,
YOSHIFUJI Hideakid4208952008-06-27 20:14:54 -0700493 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494{
495 struct rt6_info *local = NULL;
496 struct rt6_info *sprt;
497
David Ahern5e670d82018-04-17 17:33:14 -0700498 if (!oif && ipv6_addr_any(saddr) &&
499 !(rt->fib6_nh.nh_flags & RTNH_F_DEAD))
Ido Schimmel8067bb82018-01-07 12:45:09 +0200500 return rt;
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900501
David Miller071fb372017-11-28 15:40:15 -0500502 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) {
David Ahern5e670d82018-04-17 17:33:14 -0700503 const struct net_device *dev = sprt->fib6_nh.nh_dev;
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900504
David Ahern5e670d82018-04-17 17:33:14 -0700505 if (sprt->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmel8067bb82018-01-07 12:45:09 +0200506 continue;
507
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900508 if (oif) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 if (dev->ifindex == oif)
510 return sprt;
511 if (dev->flags & IFF_LOOPBACK) {
David S. Miller38308472011-12-03 18:02:47 -0500512 if (!sprt->rt6i_idev ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 sprt->rt6i_idev->dev->ifindex != oif) {
David Ahern17fb0b22015-09-25 15:22:54 -0600514 if (flags & RT6_LOOKUP_F_IFACE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 continue;
David Ahern17fb0b22015-09-25 15:22:54 -0600516 if (local &&
517 local->rt6i_idev->dev->ifindex == oif)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 continue;
519 }
520 local = sprt;
521 }
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900522 } else {
523 if (ipv6_chk_addr(net, saddr, dev,
524 flags & RT6_LOOKUP_F_IFACE))
525 return sprt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 }
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900527 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
YOSHIFUJI Hideakidd3abc42008-07-02 18:30:18 +0900529 if (oif) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 if (local)
531 return local;
532
YOSHIFUJI Hideakid4208952008-06-27 20:14:54 -0700533 if (flags & RT6_LOOKUP_F_IFACE)
David Ahern421842e2018-04-17 17:33:18 -0700534 return net->ipv6.fib6_null_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 }
Ido Schimmel8067bb82018-01-07 12:45:09 +0200536
David Ahern421842e2018-04-17 17:33:18 -0700537 return rt->fib6_nh.nh_flags & RTNH_F_DEAD ? net->ipv6.fib6_null_entry : rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538}
539
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800540#ifdef CONFIG_IPV6_ROUTER_PREF
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200541struct __rt6_probe_work {
542 struct work_struct work;
543 struct in6_addr target;
544 struct net_device *dev;
545};
546
547static void rt6_probe_deferred(struct work_struct *w)
548{
549 struct in6_addr mcaddr;
550 struct __rt6_probe_work *work =
551 container_of(w, struct __rt6_probe_work, work);
552
553 addrconf_addr_solict_mult(&work->target, &mcaddr);
Erik Nordmarkadc176c2016-12-02 14:00:08 -0800554 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200555 dev_put(work->dev);
Michael Büsch662f5532015-02-08 10:14:07 +0100556 kfree(work);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200557}
558
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800559static void rt6_probe(struct rt6_info *rt)
560{
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700561 struct __rt6_probe_work *work;
David Ahern5e670d82018-04-17 17:33:14 -0700562 const struct in6_addr *nh_gw;
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000563 struct neighbour *neigh;
David Ahern5e670d82018-04-17 17:33:14 -0700564 struct net_device *dev;
565
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800566 /*
567 * Okay, this does not seem to be appropriate
568 * for now, however, we need to check if it
569 * is really so; aka Router Reachability Probing.
570 *
571 * Router Reachability Probe MUST be rate-limited
572 * to no more than one per minute.
573 */
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000574 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
Amerigo Wangfdd66812012-09-10 02:48:44 +0000575 return;
David Ahern5e670d82018-04-17 17:33:14 -0700576
577 nh_gw = &rt->fib6_nh.nh_gw;
578 dev = rt->fib6_nh.nh_dev;
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000579 rcu_read_lock_bh();
David Ahern5e670d82018-04-17 17:33:14 -0700580 neigh = __ipv6_neigh_lookup_noref(dev, nh_gw);
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000581 if (neigh) {
Martin KaFai Lau8d6c31b2015-07-24 09:57:43 -0700582 if (neigh->nud_state & NUD_VALID)
583 goto out;
584
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700585 work = NULL;
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000586 write_lock(&neigh->lock);
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700587 if (!(neigh->nud_state & NUD_VALID) &&
588 time_after(jiffies,
589 neigh->updated +
590 rt->rt6i_idev->cnf.rtr_probe_interval)) {
591 work = kmalloc(sizeof(*work), GFP_ATOMIC);
592 if (work)
593 __neigh_set_probe_once(neigh);
Hannes Frederic Sowac2f17e82013-10-21 06:17:15 +0200594 }
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000595 write_unlock(&neigh->lock);
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700596 } else {
597 work = kmalloc(sizeof(*work), GFP_ATOMIC);
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000598 }
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700599
600 if (work) {
601 INIT_WORK(&work->work, rt6_probe_deferred);
David Ahern5e670d82018-04-17 17:33:14 -0700602 work->target = *nh_gw;
603 dev_hold(dev);
604 work->dev = dev;
Martin KaFai Lau990edb42015-07-24 09:57:42 -0700605 schedule_work(&work->work);
606 }
607
Martin KaFai Lau8d6c31b2015-07-24 09:57:43 -0700608out:
YOSHIFUJI Hideaki / 吉藤英明2152cae2013-01-17 12:53:43 +0000609 rcu_read_unlock_bh();
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800610}
611#else
612static inline void rt6_probe(struct rt6_info *rt)
613{
YOSHIFUJI Hideaki27097252006-03-20 17:05:13 -0800614}
615#endif
616
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617/*
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800618 * Default Router Selection (RFC 2461 6.3.6)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 */
Dave Jonesb6f99a22007-03-22 12:27:49 -0700620static inline int rt6_check_dev(struct rt6_info *rt, int oif)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621{
David Ahern5e670d82018-04-17 17:33:14 -0700622 const struct net_device *dev = rt->fib6_nh.nh_dev;
623
David S. Miller161980f2007-04-06 11:42:27 -0700624 if (!oif || dev->ifindex == oif)
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800625 return 2;
David S. Miller161980f2007-04-06 11:42:27 -0700626 if ((dev->flags & IFF_LOOPBACK) &&
627 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
628 return 1;
629 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630}
631
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200632static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633{
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200634 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
David Ahern5e670d82018-04-17 17:33:14 -0700635 struct neighbour *neigh;
Eric Dumazetf2c31e32011-07-29 19:00:53 +0000636
YOSHIFUJI Hideaki4d0c5912006-05-26 13:23:41 -0700637 if (rt->rt6i_flags & RTF_NONEXTHOP ||
638 !(rt->rt6i_flags & RTF_GATEWAY))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200639 return RT6_NUD_SUCCEED;
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000640
641 rcu_read_lock_bh();
David Ahern5e670d82018-04-17 17:33:14 -0700642 neigh = __ipv6_neigh_lookup_noref(rt->fib6_nh.nh_dev,
643 &rt->fib6_nh.nh_gw);
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000644 if (neigh) {
645 read_lock(&neigh->lock);
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800646 if (neigh->nud_state & NUD_VALID)
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200647 ret = RT6_NUD_SUCCEED;
YOSHIFUJI Hideaki398bcbe2008-01-19 00:35:16 -0800648#ifdef CONFIG_IPV6_ROUTER_PREF
Paul Marksa5a81f02012-12-03 10:26:54 +0000649 else if (!(neigh->nud_state & NUD_FAILED))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200650 ret = RT6_NUD_SUCCEED;
Jiri Benc7e980562013-12-11 13:48:20 +0100651 else
652 ret = RT6_NUD_FAIL_PROBE;
YOSHIFUJI Hideaki398bcbe2008-01-19 00:35:16 -0800653#endif
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000654 read_unlock(&neigh->lock);
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200655 } else {
656 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
Jiri Benc7e980562013-12-11 13:48:20 +0100657 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
Paul Marksa5a81f02012-12-03 10:26:54 +0000658 }
YOSHIFUJI Hideaki / 吉藤英明145a3622013-01-17 12:53:38 +0000659 rcu_read_unlock_bh();
660
Paul Marksa5a81f02012-12-03 10:26:54 +0000661 return ret;
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800662}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800664static int rt6_score_route(struct rt6_info *rt, int oif,
665 int strict)
666{
Paul Marksa5a81f02012-12-03 10:26:54 +0000667 int m;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +0900668
YOSHIFUJI Hideaki4d0c5912006-05-26 13:23:41 -0700669 m = rt6_check_dev(rt, oif);
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -0700670 if (!m && (strict & RT6_LOOKUP_F_IFACE))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200671 return RT6_NUD_FAIL_HARD;
YOSHIFUJI Hideakiebacaaa2006-03-20 17:04:53 -0800672#ifdef CONFIG_IPV6_ROUTER_PREF
673 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
674#endif
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200675 if (strict & RT6_LOOKUP_F_REACHABLE) {
676 int n = rt6_check_neigh(rt);
677 if (n < 0)
678 return n;
679 }
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800680 return m;
681}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
David S. Millerf11e6652007-03-24 20:36:25 -0700683static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200684 int *mpri, struct rt6_info *match,
685 bool *do_rr)
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800686{
David S. Millerf11e6652007-03-24 20:36:25 -0700687 int m;
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200688 bool match_do_rr = false;
Andy Gospodarek35103d12015-08-13 10:39:01 -0400689 struct inet6_dev *idev = rt->rt6i_idev;
Andy Gospodarek35103d12015-08-13 10:39:01 -0400690
David Ahern5e670d82018-04-17 17:33:14 -0700691 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmel8067bb82018-01-07 12:45:09 +0200692 goto out;
693
Ido Schimmel14c52062018-01-07 12:45:07 +0200694 if (idev->cnf.ignore_routes_with_linkdown &&
David Ahern5e670d82018-04-17 17:33:14 -0700695 rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
David Ahernd5d32e42016-10-24 12:27:23 -0700696 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
Andy Gospodarek35103d12015-08-13 10:39:01 -0400697 goto out;
David S. Millerf11e6652007-03-24 20:36:25 -0700698
David Ahern14895682018-04-17 17:33:17 -0700699 if (fib6_check_expired(rt))
David S. Millerf11e6652007-03-24 20:36:25 -0700700 goto out;
701
702 m = rt6_score_route(rt, oif, strict);
Jiri Benc7e980562013-12-11 13:48:20 +0100703 if (m == RT6_NUD_FAIL_DO_RR) {
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200704 match_do_rr = true;
705 m = 0; /* lowest valid score */
Jiri Benc7e980562013-12-11 13:48:20 +0100706 } else if (m == RT6_NUD_FAIL_HARD) {
David S. Millerf11e6652007-03-24 20:36:25 -0700707 goto out;
David S. Millerf11e6652007-03-24 20:36:25 -0700708 }
709
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200710 if (strict & RT6_LOOKUP_F_REACHABLE)
711 rt6_probe(rt);
712
Jiri Benc7e980562013-12-11 13:48:20 +0100713 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200714 if (m > *mpri) {
715 *do_rr = match_do_rr;
716 *mpri = m;
717 match = rt;
718 }
David S. Millerf11e6652007-03-24 20:36:25 -0700719out:
720 return match;
721}
722
723static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
Wei Wang8d1040e2017-10-06 12:06:08 -0700724 struct rt6_info *leaf,
David S. Millerf11e6652007-03-24 20:36:25 -0700725 struct rt6_info *rr_head,
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200726 u32 metric, int oif, int strict,
727 bool *do_rr)
David S. Millerf11e6652007-03-24 20:36:25 -0700728{
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700729 struct rt6_info *rt, *match, *cont;
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800730 int mpri = -1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
David S. Millerf11e6652007-03-24 20:36:25 -0700732 match = NULL;
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700733 cont = NULL;
David Miller071fb372017-11-28 15:40:15 -0500734 for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) {
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700735 if (rt->rt6i_metric != metric) {
736 cont = rt;
737 break;
738 }
739
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200740 match = find_match(rt, oif, strict, &mpri, match, do_rr);
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700741 }
742
Wei Wang66f5d6c2017-10-06 12:06:10 -0700743 for (rt = leaf; rt && rt != rr_head;
David Miller071fb372017-11-28 15:40:15 -0500744 rt = rcu_dereference(rt->rt6_next)) {
Steffen Klassert9fbdcfa2015-04-28 13:03:04 -0700745 if (rt->rt6i_metric != metric) {
746 cont = rt;
747 break;
748 }
749
750 match = find_match(rt, oif, strict, &mpri, match, do_rr);
751 }
752
753 if (match || !cont)
754 return match;
755
David Miller071fb372017-11-28 15:40:15 -0500756 for (rt = cont; rt; rt = rcu_dereference(rt->rt6_next))
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200757 match = find_match(rt, oif, strict, &mpri, match, do_rr);
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800758
David S. Millerf11e6652007-03-24 20:36:25 -0700759 return match;
760}
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800761
Wei Wang8d1040e2017-10-06 12:06:08 -0700762static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn,
763 int oif, int strict)
David S. Millerf11e6652007-03-24 20:36:25 -0700764{
Wei Wang66f5d6c2017-10-06 12:06:10 -0700765 struct rt6_info *leaf = rcu_dereference(fn->leaf);
David S. Millerf11e6652007-03-24 20:36:25 -0700766 struct rt6_info *match, *rt0;
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200767 bool do_rr = false;
Wei Wang17ecf592017-10-06 12:06:09 -0700768 int key_plen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
David Ahern421842e2018-04-17 17:33:18 -0700770 if (!leaf || leaf == net->ipv6.fib6_null_entry)
771 return net->ipv6.fib6_null_entry;
Wei Wang8d1040e2017-10-06 12:06:08 -0700772
Wei Wang66f5d6c2017-10-06 12:06:10 -0700773 rt0 = rcu_dereference(fn->rr_ptr);
David S. Millerf11e6652007-03-24 20:36:25 -0700774 if (!rt0)
Wei Wang66f5d6c2017-10-06 12:06:10 -0700775 rt0 = leaf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
Wei Wang17ecf592017-10-06 12:06:09 -0700777 /* Double check to make sure fn is not an intermediate node
778 * and fn->leaf does not points to its child's leaf
779 * (This might happen if all routes under fn are deleted from
780 * the tree and fib6_repair_tree() is called on the node.)
781 */
782 key_plen = rt0->rt6i_dst.plen;
783#ifdef CONFIG_IPV6_SUBTREES
784 if (rt0->rt6i_src.plen)
785 key_plen = rt0->rt6i_src.plen;
786#endif
787 if (fn->fn_bit != key_plen)
David Ahern421842e2018-04-17 17:33:18 -0700788 return net->ipv6.fib6_null_entry;
Wei Wang17ecf592017-10-06 12:06:09 -0700789
Wei Wang8d1040e2017-10-06 12:06:08 -0700790 match = find_rr_leaf(fn, leaf, rt0, rt0->rt6i_metric, oif, strict,
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200791 &do_rr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
Hannes Frederic Sowaafc154e2013-07-11 12:43:42 +0200793 if (do_rr) {
David Miller071fb372017-11-28 15:40:15 -0500794 struct rt6_info *next = rcu_dereference(rt0->rt6_next);
David S. Millerf11e6652007-03-24 20:36:25 -0700795
YOSHIFUJI Hideaki554cfb72006-03-20 17:00:26 -0800796 /* no entries matched; do round-robin */
David S. Millerf11e6652007-03-24 20:36:25 -0700797 if (!next || next->rt6i_metric != rt0->rt6i_metric)
Wei Wang8d1040e2017-10-06 12:06:08 -0700798 next = leaf;
David S. Millerf11e6652007-03-24 20:36:25 -0700799
Wei Wang66f5d6c2017-10-06 12:06:10 -0700800 if (next != rt0) {
801 spin_lock_bh(&leaf->rt6i_table->tb6_lock);
802 /* make sure next is not being deleted from the tree */
803 if (next->rt6i_node)
804 rcu_assign_pointer(fn->rr_ptr, next);
805 spin_unlock_bh(&leaf->rt6i_table->tb6_lock);
806 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 }
808
David Ahern421842e2018-04-17 17:33:18 -0700809 return match ? match : net->ipv6.fib6_null_entry;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810}
811
Martin KaFai Lau8b9df262015-05-22 20:55:59 -0700812static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
813{
814 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
815}
816
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800817#ifdef CONFIG_IPV6_ROUTE_INFO
818int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
Eric Dumazetb71d1d42011-04-22 04:53:02 +0000819 const struct in6_addr *gwaddr)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800820{
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +0900821 struct net *net = dev_net(dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800822 struct route_info *rinfo = (struct route_info *) opt;
823 struct in6_addr prefix_buf, *prefix;
824 unsigned int pref;
YOSHIFUJI Hideaki4bed72e2008-05-27 17:37:49 +0900825 unsigned long lifetime;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800826 struct rt6_info *rt;
827
828 if (len < sizeof(struct route_info)) {
829 return -EINVAL;
830 }
831
832 /* Sanity check for prefix_len and length */
833 if (rinfo->length > 3) {
834 return -EINVAL;
835 } else if (rinfo->prefix_len > 128) {
836 return -EINVAL;
837 } else if (rinfo->prefix_len > 64) {
838 if (rinfo->length < 2) {
839 return -EINVAL;
840 }
841 } else if (rinfo->prefix_len > 0) {
842 if (rinfo->length < 1) {
843 return -EINVAL;
844 }
845 }
846
847 pref = rinfo->route_pref;
848 if (pref == ICMPV6_ROUTER_PREF_INVALID)
Jens Rosenboom3933fc92009-09-10 06:25:11 +0000849 return -EINVAL;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800850
YOSHIFUJI Hideaki4bed72e2008-05-27 17:37:49 +0900851 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800852
853 if (rinfo->length == 3)
854 prefix = (struct in6_addr *)rinfo->prefix;
855 else {
856 /* this function is safe */
857 ipv6_addr_prefix(&prefix_buf,
858 (struct in6_addr *)rinfo->prefix,
859 rinfo->prefix_len);
860 prefix = &prefix_buf;
861 }
862
Duan Jiongf104a562013-11-08 09:56:53 +0800863 if (rinfo->prefix_len == 0)
David Ahernafb1d4b52018-04-17 17:33:11 -0700864 rt = rt6_get_dflt_router(net, gwaddr, dev);
Duan Jiongf104a562013-11-08 09:56:53 +0800865 else
866 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
David Ahern830218c2016-10-24 10:52:35 -0700867 gwaddr, dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800868
869 if (rt && !lifetime) {
David Ahernafb1d4b52018-04-17 17:33:11 -0700870 ip6_del_rt(net, rt);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800871 rt = NULL;
872 }
873
874 if (!rt && lifetime)
David Ahern830218c2016-10-24 10:52:35 -0700875 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
876 dev, pref);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800877 else if (rt)
878 rt->rt6i_flags = RTF_ROUTEINFO |
879 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
880
881 if (rt) {
Gao feng1716a962012-04-06 00:13:10 +0000882 if (!addrconf_finite_timeout(lifetime))
David Ahern14895682018-04-17 17:33:17 -0700883 fib6_clean_expires(rt);
Gao feng1716a962012-04-06 00:13:10 +0000884 else
David Ahern14895682018-04-17 17:33:17 -0700885 fib6_set_expires(rt, jiffies + HZ * lifetime);
Gao feng1716a962012-04-06 00:13:10 +0000886
Amerigo Wang94e187c2012-10-29 00:13:19 +0000887 ip6_rt_put(rt);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -0800888 }
889 return 0;
890}
891#endif
892
David Ahernae90d862018-04-17 17:33:12 -0700893/*
894 * Misc support functions
895 */
896
897/* called with rcu_lock held */
898static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt)
899{
David Ahern5e670d82018-04-17 17:33:14 -0700900 struct net_device *dev = rt->fib6_nh.nh_dev;
David Ahernae90d862018-04-17 17:33:12 -0700901
902 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) {
903 /* for copies of local routes, dst->dev needs to be the
904 * device if it is a master device, the master device if
905 * device is enslaved, and the loopback as the default
906 */
907 if (netif_is_l3_slave(dev) &&
908 !rt6_need_strict(&rt->rt6i_dst.addr))
909 dev = l3mdev_master_dev_rcu(dev);
910 else if (!netif_is_l3_master(dev))
911 dev = dev_net(dev)->loopback_dev;
912 /* last case is netif_is_l3_master(dev) is true in which
913 * case we want dev returned to be dev
914 */
915 }
916
917 return dev;
918}
919
David Ahern6edb3c92018-04-17 17:33:15 -0700920static const int fib6_prop[RTN_MAX + 1] = {
921 [RTN_UNSPEC] = 0,
922 [RTN_UNICAST] = 0,
923 [RTN_LOCAL] = 0,
924 [RTN_BROADCAST] = 0,
925 [RTN_ANYCAST] = 0,
926 [RTN_MULTICAST] = 0,
927 [RTN_BLACKHOLE] = -EINVAL,
928 [RTN_UNREACHABLE] = -EHOSTUNREACH,
929 [RTN_PROHIBIT] = -EACCES,
930 [RTN_THROW] = -EAGAIN,
931 [RTN_NAT] = -EINVAL,
932 [RTN_XRESOLVE] = -EINVAL,
933};
934
935static int ip6_rt_type_to_error(u8 fib6_type)
936{
937 return fib6_prop[fib6_type];
938}
939
940static void ip6_rt_init_dst_reject(struct rt6_info *rt, struct rt6_info *ort)
941{
942 rt->dst.error = ip6_rt_type_to_error(ort->fib6_type);
943
944 switch (ort->fib6_type) {
945 case RTN_BLACKHOLE:
946 rt->dst.output = dst_discard_out;
947 rt->dst.input = dst_discard;
948 break;
949 case RTN_PROHIBIT:
950 rt->dst.output = ip6_pkt_prohibit_out;
951 rt->dst.input = ip6_pkt_prohibit;
952 break;
953 case RTN_THROW:
954 case RTN_UNREACHABLE:
955 default:
956 rt->dst.output = ip6_pkt_discard_out;
957 rt->dst.input = ip6_pkt_discard;
958 break;
959 }
960}
961
962static void ip6_rt_init_dst(struct rt6_info *rt, struct rt6_info *ort)
963{
964 if (ort->rt6i_flags & RTF_REJECT) {
965 ip6_rt_init_dst_reject(rt, ort);
966 return;
967 }
968
969 rt->dst.error = 0;
970 rt->dst.output = ip6_output;
971
972 if (ort->fib6_type == RTN_LOCAL) {
973 rt->dst.flags |= DST_HOST;
974 rt->dst.input = ip6_input;
975 } else if (ipv6_addr_type(&ort->rt6i_dst.addr) & IPV6_ADDR_MULTICAST) {
976 rt->dst.input = ip6_mc_input;
977 } else {
978 rt->dst.input = ip6_forward;
979 }
980
981 if (ort->fib6_nh.nh_lwtstate) {
982 rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
983 lwtunnel_set_redirect(&rt->dst);
984 }
985
986 rt->dst.lastuse = jiffies;
987}
988
David Ahernae90d862018-04-17 17:33:12 -0700989static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
990{
991 BUG_ON(from->from);
992
993 rt->rt6i_flags &= ~RTF_EXPIRES;
994 dst_hold(&from->dst);
995 rt->from = from;
David Ahernd4ead6b2018-04-17 17:33:16 -0700996 dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
997 if (from->fib6_metrics != &dst_default_metrics) {
998 rt->dst._metrics |= DST_METRICS_REFCOUNTED;
999 refcount_inc(&from->fib6_metrics->refcnt);
1000 }
David Ahernae90d862018-04-17 17:33:12 -07001001}
1002
1003static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
1004{
David Ahern6edb3c92018-04-17 17:33:15 -07001005 ip6_rt_init_dst(rt, ort);
1006
David Ahernae90d862018-04-17 17:33:12 -07001007 rt->rt6i_dst = ort->rt6i_dst;
David Ahernae90d862018-04-17 17:33:12 -07001008 rt->rt6i_idev = ort->rt6i_idev;
1009 if (rt->rt6i_idev)
1010 in6_dev_hold(rt->rt6i_idev);
David Ahern5e670d82018-04-17 17:33:14 -07001011 rt->rt6i_gateway = ort->fib6_nh.nh_gw;
David Ahernae90d862018-04-17 17:33:12 -07001012 rt->rt6i_flags = ort->rt6i_flags;
1013 rt6_set_from(rt, ort);
1014 rt->rt6i_metric = ort->rt6i_metric;
1015#ifdef CONFIG_IPV6_SUBTREES
1016 rt->rt6i_src = ort->rt6i_src;
1017#endif
1018 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
1019 rt->rt6i_table = ort->rt6i_table;
David Ahern5e670d82018-04-17 17:33:14 -07001020 rt->dst.lwtstate = lwtstate_get(ort->fib6_nh.nh_lwtstate);
David Ahernae90d862018-04-17 17:33:12 -07001021}
1022
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001023static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
1024 struct in6_addr *saddr)
1025{
Wei Wang66f5d6c2017-10-06 12:06:10 -07001026 struct fib6_node *pn, *sn;
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001027 while (1) {
1028 if (fn->fn_flags & RTN_TL_ROOT)
1029 return NULL;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001030 pn = rcu_dereference(fn->parent);
1031 sn = FIB6_SUBTREE(pn);
1032 if (sn && sn != fn)
1033 fn = fib6_lookup(sn, NULL, saddr);
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001034 else
1035 fn = pn;
1036 if (fn->fn_flags & RTN_RTINFO)
1037 return fn;
1038 }
1039}
Thomas Grafc71099a2006-08-04 23:20:06 -07001040
Wei Wangd3843fe2017-10-06 12:06:06 -07001041static bool ip6_hold_safe(struct net *net, struct rt6_info **prt,
1042 bool null_fallback)
1043{
1044 struct rt6_info *rt = *prt;
1045
1046 if (dst_hold_safe(&rt->dst))
1047 return true;
1048 if (null_fallback) {
1049 rt = net->ipv6.ip6_null_entry;
1050 dst_hold(&rt->dst);
1051 } else {
1052 rt = NULL;
1053 }
1054 *prt = rt;
1055 return false;
1056}
1057
Daniel Lezcano8ed67782008-03-04 13:48:30 -08001058static struct rt6_info *ip6_pol_route_lookup(struct net *net,
1059 struct fib6_table *table,
David Ahernb75cc8f2018-03-02 08:32:17 -08001060 struct flowi6 *fl6,
1061 const struct sk_buff *skb,
1062 int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063{
Wei Wang2b760fc2017-10-06 12:06:03 -07001064 struct rt6_info *rt, *rt_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 struct fib6_node *fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
David Ahernb6cdbc82018-03-29 17:44:57 -07001067 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1068 flags &= ~RT6_LOOKUP_F_IFACE;
1069
Wei Wang66f5d6c2017-10-06 12:06:10 -07001070 rcu_read_lock();
David S. Miller4c9483b2011-03-12 16:22:43 -05001071 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
Thomas Grafc71099a2006-08-04 23:20:06 -07001072restart:
Wei Wang66f5d6c2017-10-06 12:06:10 -07001073 rt = rcu_dereference(fn->leaf);
1074 if (!rt) {
David Ahern421842e2018-04-17 17:33:18 -07001075 rt = net->ipv6.fib6_null_entry;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001076 } else {
1077 rt = rt6_device_match(net, rt, &fl6->saddr,
1078 fl6->flowi6_oif, flags);
1079 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
David Ahernb4bac172018-03-02 08:32:18 -08001080 rt = rt6_multipath_select(net, rt, fl6, fl6->flowi6_oif,
David Ahernb75cc8f2018-03-02 08:32:17 -08001081 skb, flags);
Wei Wang66f5d6c2017-10-06 12:06:10 -07001082 }
David Ahern421842e2018-04-17 17:33:18 -07001083 if (rt == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001084 fn = fib6_backtrack(fn, &fl6->saddr);
1085 if (fn)
1086 goto restart;
1087 }
Wei Wang2b760fc2017-10-06 12:06:03 -07001088 /* Search through exception table */
1089 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
1090 if (rt_cache)
1091 rt = rt_cache;
1092
Wei Wangd3843fe2017-10-06 12:06:06 -07001093 if (ip6_hold_safe(net, &rt, true))
1094 dst_use_noref(&rt->dst, jiffies);
1095
Wei Wang66f5d6c2017-10-06 12:06:10 -07001096 rcu_read_unlock();
David Ahernb8115802015-11-19 12:24:22 -08001097
Paolo Abenib65f1642017-10-19 09:31:43 +02001098 trace_fib6_table_lookup(net, rt, table, fl6);
David Ahernb8115802015-11-19 12:24:22 -08001099
Thomas Grafc71099a2006-08-04 23:20:06 -07001100 return rt;
1101
1102}
1103
Ian Morris67ba4152014-08-24 21:53:10 +01001104struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
David Ahernb75cc8f2018-03-02 08:32:17 -08001105 const struct sk_buff *skb, int flags)
Florian Westphalea6e5742011-09-05 16:05:44 +02001106{
David Ahernb75cc8f2018-03-02 08:32:17 -08001107 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
Florian Westphalea6e5742011-09-05 16:05:44 +02001108}
1109EXPORT_SYMBOL_GPL(ip6_route_lookup);
1110
YOSHIFUJI Hideaki9acd9f32008-04-10 15:42:10 +09001111struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
David Ahernb75cc8f2018-03-02 08:32:17 -08001112 const struct in6_addr *saddr, int oif,
1113 const struct sk_buff *skb, int strict)
Thomas Grafc71099a2006-08-04 23:20:06 -07001114{
David S. Miller4c9483b2011-03-12 16:22:43 -05001115 struct flowi6 fl6 = {
1116 .flowi6_oif = oif,
1117 .daddr = *daddr,
Thomas Grafc71099a2006-08-04 23:20:06 -07001118 };
1119 struct dst_entry *dst;
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -07001120 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
Thomas Grafc71099a2006-08-04 23:20:06 -07001121
Thomas Grafadaa70b2006-10-13 15:01:03 -07001122 if (saddr) {
David S. Miller4c9483b2011-03-12 16:22:43 -05001123 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
Thomas Grafadaa70b2006-10-13 15:01:03 -07001124 flags |= RT6_LOOKUP_F_HAS_SADDR;
1125 }
1126
David Ahernb75cc8f2018-03-02 08:32:17 -08001127 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
Thomas Grafc71099a2006-08-04 23:20:06 -07001128 if (dst->error == 0)
1129 return (struct rt6_info *) dst;
1130
1131 dst_release(dst);
1132
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 return NULL;
1134}
YOSHIFUJI Hideaki71590392007-02-22 22:05:40 +09001135EXPORT_SYMBOL(rt6_lookup);
1136
Thomas Grafc71099a2006-08-04 23:20:06 -07001137/* ip6_ins_rt is called with FREE table->tb6_lock.
Wei Wang1cfb71e2017-06-17 10:42:33 -07001138 * It takes new route entry, the addition fails by any reason the
1139 * route is released.
1140 * Caller must hold dst before calling it.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 */
1142
Michal Kubečeke5fd3872014-03-27 13:04:08 +01001143static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
David Ahern333c4302017-05-21 10:12:04 -06001144 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145{
1146 int err;
Thomas Grafc71099a2006-08-04 23:20:06 -07001147 struct fib6_table *table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Thomas Grafc71099a2006-08-04 23:20:06 -07001149 table = rt->rt6i_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001150 spin_lock_bh(&table->tb6_lock);
David Ahernd4ead6b2018-04-17 17:33:16 -07001151 err = fib6_add(&table->tb6_root, rt, info, extack);
Wei Wang66f5d6c2017-10-06 12:06:10 -07001152 spin_unlock_bh(&table->tb6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
1154 return err;
1155}
1156
David Ahernafb1d4b52018-04-17 17:33:11 -07001157int ip6_ins_rt(struct net *net, struct rt6_info *rt)
Thomas Graf40e22e82006-08-22 00:00:45 -07001158{
David Ahernafb1d4b52018-04-17 17:33:11 -07001159 struct nl_info info = { .nl_net = net, };
Florian Westphale715b6d2015-01-05 23:57:44 +01001160
Wei Wang1cfb71e2017-06-17 10:42:33 -07001161 /* Hold dst to account for the reference from the fib6 tree */
1162 dst_hold(&rt->dst);
David Ahernd4ead6b2018-04-17 17:33:16 -07001163 return __ip6_ins_rt(rt, &info, NULL);
Thomas Graf40e22e82006-08-22 00:00:45 -07001164}
1165
Martin KaFai Lau8b9df262015-05-22 20:55:59 -07001166static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
1167 const struct in6_addr *daddr,
1168 const struct in6_addr *saddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169{
David Ahern4832c302017-08-17 12:17:20 -07001170 struct net_device *dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 struct rt6_info *rt;
1172
1173 /*
1174 * Clone the route.
1175 */
1176
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001177 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
David Miller3a2232e2017-11-28 15:40:40 -05001178 ort = ort->from;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179
David Ahern4832c302017-08-17 12:17:20 -07001180 rcu_read_lock();
1181 dev = ip6_rt_get_dev_rcu(ort);
1182 rt = __ip6_dst_alloc(dev_net(dev), dev, 0);
1183 rcu_read_unlock();
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001184 if (!rt)
1185 return NULL;
1186
1187 ip6_rt_copy_init(rt, ort);
1188 rt->rt6i_flags |= RTF_CACHE;
1189 rt->rt6i_metric = 0;
1190 rt->dst.flags |= DST_HOST;
1191 rt->rt6i_dst.addr = *daddr;
1192 rt->rt6i_dst.plen = 128;
1193
1194 if (!rt6_is_gw_or_nonexthop(ort)) {
1195 if (ort->rt6i_dst.plen != 128 &&
1196 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
1197 rt->rt6i_flags |= RTF_ANYCAST;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198#ifdef CONFIG_IPV6_SUBTREES
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001199 if (rt->rt6i_src.plen && saddr) {
1200 rt->rt6i_src.addr = *saddr;
1201 rt->rt6i_src.plen = 128;
Martin KaFai Lau8b9df262015-05-22 20:55:59 -07001202 }
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07001203#endif
YOSHIFUJI Hideaki95a9a5b2006-03-20 16:55:51 -08001204 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
YOSHIFUJI Hideaki95a9a5b2006-03-20 16:55:51 -08001206 return rt;
1207}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001209static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1210{
David Ahern4832c302017-08-17 12:17:20 -07001211 struct net_device *dev;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001212 struct rt6_info *pcpu_rt;
1213
David Ahern4832c302017-08-17 12:17:20 -07001214 rcu_read_lock();
1215 dev = ip6_rt_get_dev_rcu(rt);
1216 pcpu_rt = __ip6_dst_alloc(dev_net(dev), dev, rt->dst.flags);
1217 rcu_read_unlock();
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001218 if (!pcpu_rt)
1219 return NULL;
1220 ip6_rt_copy_init(pcpu_rt, rt);
1221 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
1222 pcpu_rt->rt6i_flags |= RTF_PCPU;
1223 return pcpu_rt;
1224}
1225
Wei Wang66f5d6c2017-10-06 12:06:10 -07001226/* It should be called with rcu_read_lock() acquired */
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001227static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1228{
Martin KaFai Laua73e4192015-08-14 11:05:53 -07001229 struct rt6_info *pcpu_rt, **p;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001230
1231 p = this_cpu_ptr(rt->rt6i_pcpu);
1232 pcpu_rt = *p;
1233
David Ahernd4ead6b2018-04-17 17:33:16 -07001234 if (pcpu_rt)
1235 ip6_hold_safe(NULL, &pcpu_rt, false);
Wei Wangd3843fe2017-10-06 12:06:06 -07001236
Martin KaFai Laua73e4192015-08-14 11:05:53 -07001237 return pcpu_rt;
1238}
1239
David Ahernafb1d4b52018-04-17 17:33:11 -07001240static struct rt6_info *rt6_make_pcpu_route(struct net *net,
1241 struct rt6_info *rt)
Martin KaFai Laua73e4192015-08-14 11:05:53 -07001242{
1243 struct rt6_info *pcpu_rt, *prev, **p;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001244
1245 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1246 if (!pcpu_rt) {
Martin KaFai Lau9c7370a2015-08-14 11:05:54 -07001247 dst_hold(&net->ipv6.ip6_null_entry->dst);
1248 return net->ipv6.ip6_null_entry;
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001249 }
1250
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001251 dst_hold(&pcpu_rt->dst);
Wei Wanga94b9362017-10-06 12:06:04 -07001252 p = this_cpu_ptr(rt->rt6i_pcpu);
1253 prev = cmpxchg(p, NULL, pcpu_rt);
Eric Dumazet951f7882017-10-08 21:07:18 -07001254 BUG_ON(prev);
Wei Wanga94b9362017-10-06 12:06:04 -07001255
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001256 return pcpu_rt;
1257}
1258
Wei Wang35732d02017-10-06 12:05:57 -07001259/* exception hash table implementation
1260 */
1261static DEFINE_SPINLOCK(rt6_exception_lock);
1262
1263/* Remove rt6_ex from hash table and free the memory
1264 * Caller must hold rt6_exception_lock
1265 */
1266static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1267 struct rt6_exception *rt6_ex)
1268{
Colin Ian Kingb2427e62017-10-10 18:01:16 +01001269 struct net *net;
Wei Wang81eb8442017-10-06 12:06:11 -07001270
Wei Wang35732d02017-10-06 12:05:57 -07001271 if (!bucket || !rt6_ex)
1272 return;
Colin Ian Kingb2427e62017-10-10 18:01:16 +01001273
1274 net = dev_net(rt6_ex->rt6i->dst.dev);
Wei Wang35732d02017-10-06 12:05:57 -07001275 rt6_ex->rt6i->rt6i_node = NULL;
1276 hlist_del_rcu(&rt6_ex->hlist);
1277 rt6_release(rt6_ex->rt6i);
1278 kfree_rcu(rt6_ex, rcu);
1279 WARN_ON_ONCE(!bucket->depth);
1280 bucket->depth--;
Wei Wang81eb8442017-10-06 12:06:11 -07001281 net->ipv6.rt6_stats->fib_rt_cache--;
Wei Wang35732d02017-10-06 12:05:57 -07001282}
1283
1284/* Remove oldest rt6_ex in bucket and free the memory
1285 * Caller must hold rt6_exception_lock
1286 */
1287static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1288{
1289 struct rt6_exception *rt6_ex, *oldest = NULL;
1290
1291 if (!bucket)
1292 return;
1293
1294 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1295 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1296 oldest = rt6_ex;
1297 }
1298 rt6_remove_exception(bucket, oldest);
1299}
1300
1301static u32 rt6_exception_hash(const struct in6_addr *dst,
1302 const struct in6_addr *src)
1303{
1304 static u32 seed __read_mostly;
1305 u32 val;
1306
1307 net_get_random_once(&seed, sizeof(seed));
1308 val = jhash(dst, sizeof(*dst), seed);
1309
1310#ifdef CONFIG_IPV6_SUBTREES
1311 if (src)
1312 val = jhash(src, sizeof(*src), val);
1313#endif
1314 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1315}
1316
1317/* Helper function to find the cached rt in the hash table
1318 * and update bucket pointer to point to the bucket for this
1319 * (daddr, saddr) pair
1320 * Caller must hold rt6_exception_lock
1321 */
1322static struct rt6_exception *
1323__rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1324 const struct in6_addr *daddr,
1325 const struct in6_addr *saddr)
1326{
1327 struct rt6_exception *rt6_ex;
1328 u32 hval;
1329
1330 if (!(*bucket) || !daddr)
1331 return NULL;
1332
1333 hval = rt6_exception_hash(daddr, saddr);
1334 *bucket += hval;
1335
1336 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1337 struct rt6_info *rt6 = rt6_ex->rt6i;
1338 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1339
1340#ifdef CONFIG_IPV6_SUBTREES
1341 if (matched && saddr)
1342 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1343#endif
1344 if (matched)
1345 return rt6_ex;
1346 }
1347 return NULL;
1348}
1349
1350/* Helper function to find the cached rt in the hash table
1351 * and update bucket pointer to point to the bucket for this
1352 * (daddr, saddr) pair
1353 * Caller must hold rcu_read_lock()
1354 */
1355static struct rt6_exception *
1356__rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1357 const struct in6_addr *daddr,
1358 const struct in6_addr *saddr)
1359{
1360 struct rt6_exception *rt6_ex;
1361 u32 hval;
1362
1363 WARN_ON_ONCE(!rcu_read_lock_held());
1364
1365 if (!(*bucket) || !daddr)
1366 return NULL;
1367
1368 hval = rt6_exception_hash(daddr, saddr);
1369 *bucket += hval;
1370
1371 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1372 struct rt6_info *rt6 = rt6_ex->rt6i;
1373 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1374
1375#ifdef CONFIG_IPV6_SUBTREES
1376 if (matched && saddr)
1377 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1378#endif
1379 if (matched)
1380 return rt6_ex;
1381 }
1382 return NULL;
1383}
1384
David Ahernd4ead6b2018-04-17 17:33:16 -07001385static unsigned int fib6_mtu(const struct rt6_info *rt)
1386{
1387 unsigned int mtu;
1388
1389 mtu = rt->fib6_pmtu ? : rt->rt6i_idev->cnf.mtu6;
1390 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1391
1392 return mtu - lwtunnel_headroom(rt->fib6_nh.nh_lwtstate, mtu);
1393}
1394
Wei Wang35732d02017-10-06 12:05:57 -07001395static int rt6_insert_exception(struct rt6_info *nrt,
1396 struct rt6_info *ort)
1397{
David Ahern5e670d82018-04-17 17:33:14 -07001398 struct net *net = dev_net(nrt->dst.dev);
Wei Wang35732d02017-10-06 12:05:57 -07001399 struct rt6_exception_bucket *bucket;
1400 struct in6_addr *src_key = NULL;
1401 struct rt6_exception *rt6_ex;
1402 int err = 0;
1403
1404 /* ort can't be a cache or pcpu route */
1405 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
David Miller3a2232e2017-11-28 15:40:40 -05001406 ort = ort->from;
Wei Wang35732d02017-10-06 12:05:57 -07001407 WARN_ON_ONCE(ort->rt6i_flags & (RTF_CACHE | RTF_PCPU));
1408
1409 spin_lock_bh(&rt6_exception_lock);
1410
1411 if (ort->exception_bucket_flushed) {
1412 err = -EINVAL;
1413 goto out;
1414 }
1415
1416 bucket = rcu_dereference_protected(ort->rt6i_exception_bucket,
1417 lockdep_is_held(&rt6_exception_lock));
1418 if (!bucket) {
1419 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1420 GFP_ATOMIC);
1421 if (!bucket) {
1422 err = -ENOMEM;
1423 goto out;
1424 }
1425 rcu_assign_pointer(ort->rt6i_exception_bucket, bucket);
1426 }
1427
1428#ifdef CONFIG_IPV6_SUBTREES
1429 /* rt6i_src.plen != 0 indicates ort is in subtree
1430 * and exception table is indexed by a hash of
1431 * both rt6i_dst and rt6i_src.
1432 * Otherwise, the exception table is indexed by
1433 * a hash of only rt6i_dst.
1434 */
1435 if (ort->rt6i_src.plen)
1436 src_key = &nrt->rt6i_src.addr;
1437#endif
Wei Wang60006a42017-10-06 12:05:58 -07001438
1439 /* Update rt6i_prefsrc as it could be changed
1440 * in rt6_remove_prefsrc()
1441 */
1442 nrt->rt6i_prefsrc = ort->rt6i_prefsrc;
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001443 /* rt6_mtu_change() might lower mtu on ort.
1444 * Only insert this exception route if its mtu
1445 * is less than ort's mtu value.
1446 */
David Ahernd4ead6b2018-04-17 17:33:16 -07001447 if (dst_metric_raw(&nrt->dst, RTAX_MTU) >= fib6_mtu(ort)) {
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001448 err = -EINVAL;
1449 goto out;
1450 }
Wei Wang60006a42017-10-06 12:05:58 -07001451
Wei Wang35732d02017-10-06 12:05:57 -07001452 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1453 src_key);
1454 if (rt6_ex)
1455 rt6_remove_exception(bucket, rt6_ex);
1456
1457 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1458 if (!rt6_ex) {
1459 err = -ENOMEM;
1460 goto out;
1461 }
1462 rt6_ex->rt6i = nrt;
1463 rt6_ex->stamp = jiffies;
1464 atomic_inc(&nrt->rt6i_ref);
1465 nrt->rt6i_node = ort->rt6i_node;
1466 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1467 bucket->depth++;
Wei Wang81eb8442017-10-06 12:06:11 -07001468 net->ipv6.rt6_stats->fib_rt_cache++;
Wei Wang35732d02017-10-06 12:05:57 -07001469
1470 if (bucket->depth > FIB6_MAX_DEPTH)
1471 rt6_exception_remove_oldest(bucket);
1472
1473out:
1474 spin_unlock_bh(&rt6_exception_lock);
1475
1476 /* Update fn->fn_sernum to invalidate all cached dst */
Paolo Abenib886d5f2017-10-19 16:07:10 +02001477 if (!err) {
Ido Schimmel922c2ac2018-01-07 12:45:14 +02001478 spin_lock_bh(&ort->rt6i_table->tb6_lock);
David Ahern7aef6852018-04-17 17:33:10 -07001479 fib6_update_sernum(net, ort);
Ido Schimmel922c2ac2018-01-07 12:45:14 +02001480 spin_unlock_bh(&ort->rt6i_table->tb6_lock);
Paolo Abenib886d5f2017-10-19 16:07:10 +02001481 fib6_force_start_gc(net);
1482 }
Wei Wang35732d02017-10-06 12:05:57 -07001483
1484 return err;
1485}
1486
1487void rt6_flush_exceptions(struct rt6_info *rt)
1488{
1489 struct rt6_exception_bucket *bucket;
1490 struct rt6_exception *rt6_ex;
1491 struct hlist_node *tmp;
1492 int i;
1493
1494 spin_lock_bh(&rt6_exception_lock);
1495 /* Prevent rt6_insert_exception() to recreate the bucket list */
1496 rt->exception_bucket_flushed = 1;
1497
1498 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1499 lockdep_is_held(&rt6_exception_lock));
1500 if (!bucket)
1501 goto out;
1502
1503 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1504 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
1505 rt6_remove_exception(bucket, rt6_ex);
1506 WARN_ON_ONCE(bucket->depth);
1507 bucket++;
1508 }
1509
1510out:
1511 spin_unlock_bh(&rt6_exception_lock);
1512}
1513
1514/* Find cached rt in the hash table inside passed in rt
1515 * Caller has to hold rcu_read_lock()
1516 */
1517static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
1518 struct in6_addr *daddr,
1519 struct in6_addr *saddr)
1520{
1521 struct rt6_exception_bucket *bucket;
1522 struct in6_addr *src_key = NULL;
1523 struct rt6_exception *rt6_ex;
1524 struct rt6_info *res = NULL;
1525
1526 bucket = rcu_dereference(rt->rt6i_exception_bucket);
1527
1528#ifdef CONFIG_IPV6_SUBTREES
1529 /* rt6i_src.plen != 0 indicates rt is in subtree
1530 * and exception table is indexed by a hash of
1531 * both rt6i_dst and rt6i_src.
1532 * Otherwise, the exception table is indexed by
1533 * a hash of only rt6i_dst.
1534 */
1535 if (rt->rt6i_src.plen)
1536 src_key = saddr;
1537#endif
1538 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1539
1540 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1541 res = rt6_ex->rt6i;
1542
1543 return res;
1544}
1545
1546/* Remove the passed in cached rt from the hash table that contains it */
1547int rt6_remove_exception_rt(struct rt6_info *rt)
1548{
Wei Wang35732d02017-10-06 12:05:57 -07001549 struct rt6_exception_bucket *bucket;
David Miller3a2232e2017-11-28 15:40:40 -05001550 struct rt6_info *from = rt->from;
Wei Wang35732d02017-10-06 12:05:57 -07001551 struct in6_addr *src_key = NULL;
1552 struct rt6_exception *rt6_ex;
1553 int err;
1554
1555 if (!from ||
Colin Ian King442d7132017-10-10 19:10:30 +01001556 !(rt->rt6i_flags & RTF_CACHE))
Wei Wang35732d02017-10-06 12:05:57 -07001557 return -EINVAL;
1558
1559 if (!rcu_access_pointer(from->rt6i_exception_bucket))
1560 return -ENOENT;
1561
1562 spin_lock_bh(&rt6_exception_lock);
1563 bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
1564 lockdep_is_held(&rt6_exception_lock));
1565#ifdef CONFIG_IPV6_SUBTREES
1566 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1567 * and exception table is indexed by a hash of
1568 * both rt6i_dst and rt6i_src.
1569 * Otherwise, the exception table is indexed by
1570 * a hash of only rt6i_dst.
1571 */
1572 if (from->rt6i_src.plen)
1573 src_key = &rt->rt6i_src.addr;
1574#endif
1575 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1576 &rt->rt6i_dst.addr,
1577 src_key);
1578 if (rt6_ex) {
1579 rt6_remove_exception(bucket, rt6_ex);
1580 err = 0;
1581 } else {
1582 err = -ENOENT;
1583 }
1584
1585 spin_unlock_bh(&rt6_exception_lock);
1586 return err;
1587}
1588
1589/* Find rt6_ex which contains the passed in rt cache and
1590 * refresh its stamp
1591 */
1592static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1593{
Wei Wang35732d02017-10-06 12:05:57 -07001594 struct rt6_exception_bucket *bucket;
David Miller3a2232e2017-11-28 15:40:40 -05001595 struct rt6_info *from = rt->from;
Wei Wang35732d02017-10-06 12:05:57 -07001596 struct in6_addr *src_key = NULL;
1597 struct rt6_exception *rt6_ex;
1598
1599 if (!from ||
Colin Ian King442d7132017-10-10 19:10:30 +01001600 !(rt->rt6i_flags & RTF_CACHE))
Wei Wang35732d02017-10-06 12:05:57 -07001601 return;
1602
1603 rcu_read_lock();
1604 bucket = rcu_dereference(from->rt6i_exception_bucket);
1605
1606#ifdef CONFIG_IPV6_SUBTREES
1607 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1608 * and exception table is indexed by a hash of
1609 * both rt6i_dst and rt6i_src.
1610 * Otherwise, the exception table is indexed by
1611 * a hash of only rt6i_dst.
1612 */
1613 if (from->rt6i_src.plen)
1614 src_key = &rt->rt6i_src.addr;
1615#endif
1616 rt6_ex = __rt6_find_exception_rcu(&bucket,
1617 &rt->rt6i_dst.addr,
1618 src_key);
1619 if (rt6_ex)
1620 rt6_ex->stamp = jiffies;
1621
1622 rcu_read_unlock();
1623}
1624
Wei Wang60006a42017-10-06 12:05:58 -07001625static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
1626{
1627 struct rt6_exception_bucket *bucket;
1628 struct rt6_exception *rt6_ex;
1629 int i;
1630
1631 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1632 lockdep_is_held(&rt6_exception_lock));
1633
1634 if (bucket) {
1635 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1636 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1637 rt6_ex->rt6i->rt6i_prefsrc.plen = 0;
1638 }
1639 bucket++;
1640 }
1641 }
1642}
1643
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001644static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1645 struct rt6_info *rt, int mtu)
1646{
1647 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1648 * lowest MTU in the path: always allow updating the route PMTU to
1649 * reflect PMTU decreases.
1650 *
1651 * If the new MTU is higher, and the route PMTU is equal to the local
1652 * MTU, this means the old MTU is the lowest in the path, so allow
1653 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1654 * handle this.
1655 */
1656
1657 if (dst_mtu(&rt->dst) >= mtu)
1658 return true;
1659
1660 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1661 return true;
1662
1663 return false;
1664}
1665
1666static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1667 struct rt6_info *rt, int mtu)
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001668{
1669 struct rt6_exception_bucket *bucket;
1670 struct rt6_exception *rt6_ex;
1671 int i;
1672
1673 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1674 lockdep_is_held(&rt6_exception_lock));
1675
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001676 if (!bucket)
1677 return;
1678
1679 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1680 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1681 struct rt6_info *entry = rt6_ex->rt6i;
1682
1683 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
David Ahernd4ead6b2018-04-17 17:33:16 -07001684 * route), the metrics of its rt->from have already
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001685 * been updated.
1686 */
David Ahernd4ead6b2018-04-17 17:33:16 -07001687 if (dst_metric_raw(&entry->dst, RTAX_MTU) &&
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001688 rt6_mtu_change_route_allowed(idev, entry, mtu))
David Ahernd4ead6b2018-04-17 17:33:16 -07001689 dst_metric_set(&entry->dst, RTAX_MTU, mtu);
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001690 }
Stefano Brivioe9fa1492018-03-06 11:10:19 +01001691 bucket++;
Wei Wangf5bbe7e2017-10-06 12:05:59 -07001692 }
1693}
1694
Wei Wangb16cb452017-10-06 12:06:00 -07001695#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1696
1697static void rt6_exceptions_clean_tohost(struct rt6_info *rt,
1698 struct in6_addr *gateway)
1699{
1700 struct rt6_exception_bucket *bucket;
1701 struct rt6_exception *rt6_ex;
1702 struct hlist_node *tmp;
1703 int i;
1704
1705 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1706 return;
1707
1708 spin_lock_bh(&rt6_exception_lock);
1709 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1710 lockdep_is_held(&rt6_exception_lock));
1711
1712 if (bucket) {
1713 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1714 hlist_for_each_entry_safe(rt6_ex, tmp,
1715 &bucket->chain, hlist) {
1716 struct rt6_info *entry = rt6_ex->rt6i;
1717
1718 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1719 RTF_CACHE_GATEWAY &&
1720 ipv6_addr_equal(gateway,
1721 &entry->rt6i_gateway)) {
1722 rt6_remove_exception(bucket, rt6_ex);
1723 }
1724 }
1725 bucket++;
1726 }
1727 }
1728
1729 spin_unlock_bh(&rt6_exception_lock);
1730}
1731
Wei Wangc757faa2017-10-06 12:06:01 -07001732static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1733 struct rt6_exception *rt6_ex,
1734 struct fib6_gc_args *gc_args,
1735 unsigned long now)
1736{
1737 struct rt6_info *rt = rt6_ex->rt6i;
1738
Paolo Abeni1859bac2017-10-19 16:07:11 +02001739 /* we are pruning and obsoleting aged-out and non gateway exceptions
1740 * even if others have still references to them, so that on next
1741 * dst_check() such references can be dropped.
1742 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
1743 * expired, independently from their aging, as per RFC 8201 section 4
1744 */
Wei Wang31afeb42018-01-26 11:40:17 -08001745 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
1746 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1747 RT6_TRACE("aging clone %p\n", rt);
1748 rt6_remove_exception(bucket, rt6_ex);
1749 return;
1750 }
1751 } else if (time_after(jiffies, rt->dst.expires)) {
1752 RT6_TRACE("purging expired route %p\n", rt);
Wei Wangc757faa2017-10-06 12:06:01 -07001753 rt6_remove_exception(bucket, rt6_ex);
1754 return;
Wei Wang31afeb42018-01-26 11:40:17 -08001755 }
1756
1757 if (rt->rt6i_flags & RTF_GATEWAY) {
Wei Wangc757faa2017-10-06 12:06:01 -07001758 struct neighbour *neigh;
1759 __u8 neigh_flags = 0;
1760
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001761 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
1762 if (neigh)
Wei Wangc757faa2017-10-06 12:06:01 -07001763 neigh_flags = neigh->flags;
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001764
Wei Wangc757faa2017-10-06 12:06:01 -07001765 if (!(neigh_flags & NTF_ROUTER)) {
1766 RT6_TRACE("purging route %p via non-router but gateway\n",
1767 rt);
1768 rt6_remove_exception(bucket, rt6_ex);
1769 return;
1770 }
1771 }
Wei Wang31afeb42018-01-26 11:40:17 -08001772
Wei Wangc757faa2017-10-06 12:06:01 -07001773 gc_args->more++;
1774}
1775
1776void rt6_age_exceptions(struct rt6_info *rt,
1777 struct fib6_gc_args *gc_args,
1778 unsigned long now)
1779{
1780 struct rt6_exception_bucket *bucket;
1781 struct rt6_exception *rt6_ex;
1782 struct hlist_node *tmp;
1783 int i;
1784
1785 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1786 return;
1787
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001788 rcu_read_lock_bh();
1789 spin_lock(&rt6_exception_lock);
Wei Wangc757faa2017-10-06 12:06:01 -07001790 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1791 lockdep_is_held(&rt6_exception_lock));
1792
1793 if (bucket) {
1794 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1795 hlist_for_each_entry_safe(rt6_ex, tmp,
1796 &bucket->chain, hlist) {
1797 rt6_age_examine_exception(bucket, rt6_ex,
1798 gc_args, now);
1799 }
1800 bucket++;
1801 }
1802 }
Eric Dumazet1bfa26f2018-03-23 07:56:58 -07001803 spin_unlock(&rt6_exception_lock);
1804 rcu_read_unlock_bh();
Wei Wangc757faa2017-10-06 12:06:01 -07001805}
1806
David Ahern9ff74382016-06-13 13:44:19 -07001807struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
David Ahernb75cc8f2018-03-02 08:32:17 -08001808 int oif, struct flowi6 *fl6,
1809 const struct sk_buff *skb, int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810{
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001811 struct fib6_node *fn, *saved_fn;
Wei Wang2b760fc2017-10-06 12:06:03 -07001812 struct rt6_info *rt, *rt_cache;
Thomas Grafc71099a2006-08-04 23:20:06 -07001813 int strict = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -07001815 strict |= flags & RT6_LOOKUP_F_IFACE;
David Ahernd5d32e42016-10-24 12:27:23 -07001816 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001817 if (net->ipv6.devconf_all->forwarding == 0)
1818 strict |= RT6_LOOKUP_F_REACHABLE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
Wei Wang66f5d6c2017-10-06 12:06:10 -07001820 rcu_read_lock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
David S. Miller4c9483b2011-03-12 16:22:43 -05001822 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001823 saved_fn = fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
David Ahernca254492015-10-12 11:47:10 -07001825 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1826 oif = 0;
1827
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001828redo_rt6_select:
Wei Wang8d1040e2017-10-06 12:06:08 -07001829 rt = rt6_select(net, fn, oif, strict);
Nicolas Dichtel52bd4c02013-06-28 17:35:48 +02001830 if (rt->rt6i_nsiblings)
David Ahernb4bac172018-03-02 08:32:18 -08001831 rt = rt6_multipath_select(net, rt, fl6, oif, skb, strict);
David Ahern421842e2018-04-17 17:33:18 -07001832 if (rt == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001833 fn = fib6_backtrack(fn, &fl6->saddr);
1834 if (fn)
1835 goto redo_rt6_select;
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001836 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1837 /* also consider unreachable route */
1838 strict &= ~RT6_LOOKUP_F_REACHABLE;
1839 fn = saved_fn;
1840 goto redo_rt6_select;
Martin KaFai Lau367efcb2014-10-20 13:42:45 -07001841 }
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07001842 }
1843
Wei Wang2b760fc2017-10-06 12:06:03 -07001844 /*Search through exception table */
1845 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
1846 if (rt_cache)
1847 rt = rt_cache;
YOSHIFUJI Hideakifb9de912006-03-20 16:59:08 -08001848
David Ahern421842e2018-04-17 17:33:18 -07001849 if (rt == net->ipv6.fib6_null_entry) {
1850 rt = net->ipv6.ip6_null_entry;
Wei Wang66f5d6c2017-10-06 12:06:10 -07001851 rcu_read_unlock();
Wei Wangd3843fe2017-10-06 12:06:06 -07001852 dst_hold(&rt->dst);
Paolo Abenib65f1642017-10-19 09:31:43 +02001853 trace_fib6_table_lookup(net, rt, table, fl6);
Wei Wangd3843fe2017-10-06 12:06:06 -07001854 return rt;
1855 } else if (rt->rt6i_flags & RTF_CACHE) {
David Ahernd4ead6b2018-04-17 17:33:16 -07001856 if (ip6_hold_safe(net, &rt, true))
Wei Wangd3843fe2017-10-06 12:06:06 -07001857 dst_use_noref(&rt->dst, jiffies);
David Ahernd4ead6b2018-04-17 17:33:16 -07001858
Wei Wang66f5d6c2017-10-06 12:06:10 -07001859 rcu_read_unlock();
Paolo Abenib65f1642017-10-19 09:31:43 +02001860 trace_fib6_table_lookup(net, rt, table, fl6);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001861 return rt;
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001862 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1863 !(rt->rt6i_flags & RTF_GATEWAY))) {
1864 /* Create a RTF_CACHE clone which will not be
1865 * owned by the fib6 tree. It is for the special case where
1866 * the daddr in the skb during the neighbor look-up is different
1867 * from the fl6->daddr used to look-up route here.
1868 */
Thomas Grafc71099a2006-08-04 23:20:06 -07001869
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001870 struct rt6_info *uncached_rt;
1871
Wei Wangd3843fe2017-10-06 12:06:06 -07001872 if (ip6_hold_safe(net, &rt, true)) {
1873 dst_use_noref(&rt->dst, jiffies);
1874 } else {
Wei Wang66f5d6c2017-10-06 12:06:10 -07001875 rcu_read_unlock();
Wei Wangd3843fe2017-10-06 12:06:06 -07001876 uncached_rt = rt;
1877 goto uncached_rt_out;
1878 }
Wei Wang66f5d6c2017-10-06 12:06:10 -07001879 rcu_read_unlock();
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001880
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001881 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1882 dst_release(&rt->dst);
1883
Wei Wang1cfb71e2017-06-17 10:42:33 -07001884 if (uncached_rt) {
1885 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1886 * No need for another dst_hold()
1887 */
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -07001888 rt6_uncached_list_add(uncached_rt);
Wei Wang81eb8442017-10-06 12:06:11 -07001889 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
Wei Wang1cfb71e2017-06-17 10:42:33 -07001890 } else {
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001891 uncached_rt = net->ipv6.ip6_null_entry;
Wei Wang1cfb71e2017-06-17 10:42:33 -07001892 dst_hold(&uncached_rt->dst);
1893 }
David Ahernb8115802015-11-19 12:24:22 -08001894
Wei Wangd3843fe2017-10-06 12:06:06 -07001895uncached_rt_out:
Paolo Abenib65f1642017-10-19 09:31:43 +02001896 trace_fib6_table_lookup(net, uncached_rt, table, fl6);
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001897 return uncached_rt;
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07001898
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001899 } else {
1900 /* Get a percpu copy */
1901
1902 struct rt6_info *pcpu_rt;
1903
Wei Wangd3843fe2017-10-06 12:06:06 -07001904 dst_use_noref(&rt->dst, jiffies);
Eric Dumazet951f7882017-10-08 21:07:18 -07001905 local_bh_disable();
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001906 pcpu_rt = rt6_get_pcpu_route(rt);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001907
Eric Dumazet951f7882017-10-08 21:07:18 -07001908 if (!pcpu_rt) {
Wei Wanga94b9362017-10-06 12:06:04 -07001909 /* atomic_inc_not_zero() is needed when using rcu */
1910 if (atomic_inc_not_zero(&rt->rt6i_ref)) {
Eric Dumazet951f7882017-10-08 21:07:18 -07001911 /* No dst_hold() on rt is needed because grabbing
Wei Wanga94b9362017-10-06 12:06:04 -07001912 * rt->rt6i_ref makes sure rt can't be released.
1913 */
David Ahernafb1d4b52018-04-17 17:33:11 -07001914 pcpu_rt = rt6_make_pcpu_route(net, rt);
Wei Wanga94b9362017-10-06 12:06:04 -07001915 rt6_release(rt);
1916 } else {
1917 /* rt is already removed from tree */
Wei Wanga94b9362017-10-06 12:06:04 -07001918 pcpu_rt = net->ipv6.ip6_null_entry;
1919 dst_hold(&pcpu_rt->dst);
1920 }
Martin KaFai Lau9c7370a2015-08-14 11:05:54 -07001921 }
Eric Dumazet951f7882017-10-08 21:07:18 -07001922 local_bh_enable();
1923 rcu_read_unlock();
Paolo Abenib65f1642017-10-19 09:31:43 +02001924 trace_fib6_table_lookup(net, pcpu_rt, table, fl6);
Martin KaFai Laud52d3992015-05-22 20:56:06 -07001925 return pcpu_rt;
1926 }
Thomas Grafc71099a2006-08-04 23:20:06 -07001927}
David Ahern9ff74382016-06-13 13:44:19 -07001928EXPORT_SYMBOL_GPL(ip6_pol_route);
Thomas Grafc71099a2006-08-04 23:20:06 -07001929
David Ahernb75cc8f2018-03-02 08:32:17 -08001930static struct rt6_info *ip6_pol_route_input(struct net *net,
1931 struct fib6_table *table,
1932 struct flowi6 *fl6,
1933 const struct sk_buff *skb,
1934 int flags)
Pavel Emelyanov4acad722007-10-15 13:02:51 -07001935{
David Ahernb75cc8f2018-03-02 08:32:17 -08001936 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
Pavel Emelyanov4acad722007-10-15 13:02:51 -07001937}
1938
Mahesh Bandeward409b842016-09-16 12:59:08 -07001939struct dst_entry *ip6_route_input_lookup(struct net *net,
1940 struct net_device *dev,
David Ahernb75cc8f2018-03-02 08:32:17 -08001941 struct flowi6 *fl6,
1942 const struct sk_buff *skb,
1943 int flags)
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00001944{
1945 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1946 flags |= RT6_LOOKUP_F_IFACE;
1947
David Ahernb75cc8f2018-03-02 08:32:17 -08001948 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00001949}
Mahesh Bandeward409b842016-09-16 12:59:08 -07001950EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00001951
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001952static void ip6_multipath_l3_keys(const struct sk_buff *skb,
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05001953 struct flow_keys *keys,
1954 struct flow_keys *flkeys)
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001955{
1956 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
1957 const struct ipv6hdr *key_iph = outer_iph;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05001958 struct flow_keys *_flkeys = flkeys;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001959 const struct ipv6hdr *inner_iph;
1960 const struct icmp6hdr *icmph;
1961 struct ipv6hdr _inner_iph;
1962
1963 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1964 goto out;
1965
1966 icmph = icmp6_hdr(skb);
1967 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1968 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1969 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
1970 icmph->icmp6_type != ICMPV6_PARAMPROB)
1971 goto out;
1972
1973 inner_iph = skb_header_pointer(skb,
1974 skb_transport_offset(skb) + sizeof(*icmph),
1975 sizeof(_inner_iph), &_inner_iph);
1976 if (!inner_iph)
1977 goto out;
1978
1979 key_iph = inner_iph;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05001980 _flkeys = NULL;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001981out:
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05001982 if (_flkeys) {
1983 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
1984 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
1985 keys->tags.flow_label = _flkeys->tags.flow_label;
1986 keys->basic.ip_proto = _flkeys->basic.ip_proto;
1987 } else {
1988 keys->addrs.v6addrs.src = key_iph->saddr;
1989 keys->addrs.v6addrs.dst = key_iph->daddr;
1990 keys->tags.flow_label = ip6_flowinfo(key_iph);
1991 keys->basic.ip_proto = key_iph->nexthdr;
1992 }
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001993}
1994
1995/* if skb is set it will be used and fl6 can be NULL */
David Ahernb4bac172018-03-02 08:32:18 -08001996u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
1997 const struct sk_buff *skb, struct flow_keys *flkeys)
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02001998{
1999 struct flow_keys hash_keys;
David Ahern9a2a5372018-03-02 08:32:15 -08002000 u32 mhash;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002001
David S. Millerbbfa0472018-03-12 11:09:33 -04002002 switch (ip6_multipath_hash_policy(net)) {
David Ahernb4bac172018-03-02 08:32:18 -08002003 case 0:
2004 memset(&hash_keys, 0, sizeof(hash_keys));
2005 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2006 if (skb) {
2007 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
2008 } else {
2009 hash_keys.addrs.v6addrs.src = fl6->saddr;
2010 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2011 hash_keys.tags.flow_label = (__force u32)fl6->flowlabel;
2012 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2013 }
2014 break;
2015 case 1:
2016 if (skb) {
2017 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2018 struct flow_keys keys;
2019
2020 /* short-circuit if we already have L4 hash present */
2021 if (skb->l4_hash)
2022 return skb_get_hash_raw(skb) >> 1;
2023
2024 memset(&hash_keys, 0, sizeof(hash_keys));
2025
2026 if (!flkeys) {
2027 skb_flow_dissect_flow_keys(skb, &keys, flag);
2028 flkeys = &keys;
2029 }
2030 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2031 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
2032 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
2033 hash_keys.ports.src = flkeys->ports.src;
2034 hash_keys.ports.dst = flkeys->ports.dst;
2035 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2036 } else {
2037 memset(&hash_keys, 0, sizeof(hash_keys));
2038 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2039 hash_keys.addrs.v6addrs.src = fl6->saddr;
2040 hash_keys.addrs.v6addrs.dst = fl6->daddr;
2041 hash_keys.ports.src = fl6->fl6_sport;
2042 hash_keys.ports.dst = fl6->fl6_dport;
2043 hash_keys.basic.ip_proto = fl6->flowi6_proto;
2044 }
2045 break;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002046 }
David Ahern9a2a5372018-03-02 08:32:15 -08002047 mhash = flow_hash_from_keys(&hash_keys);
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002048
David Ahern9a2a5372018-03-02 08:32:15 -08002049 return mhash >> 1;
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002050}
2051
Thomas Grafc71099a2006-08-04 23:20:06 -07002052void ip6_route_input(struct sk_buff *skb)
2053{
Eric Dumazetb71d1d42011-04-22 04:53:02 +00002054 const struct ipv6hdr *iph = ipv6_hdr(skb);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002055 struct net *net = dev_net(skb->dev);
Thomas Grafadaa70b2006-10-13 15:01:03 -07002056 int flags = RT6_LOOKUP_F_HAS_SADDR;
Jiri Benc904af042015-08-20 13:56:31 +02002057 struct ip_tunnel_info *tun_info;
David S. Miller4c9483b2011-03-12 16:22:43 -05002058 struct flowi6 fl6 = {
David Aherne0d56fd2016-09-10 12:09:57 -07002059 .flowi6_iif = skb->dev->ifindex,
David S. Miller4c9483b2011-03-12 16:22:43 -05002060 .daddr = iph->daddr,
2061 .saddr = iph->saddr,
YOSHIFUJI Hideaki / 吉藤英明6502ca52013-01-13 05:01:51 +00002062 .flowlabel = ip6_flowinfo(iph),
David S. Miller4c9483b2011-03-12 16:22:43 -05002063 .flowi6_mark = skb->mark,
2064 .flowi6_proto = iph->nexthdr,
Thomas Grafc71099a2006-08-04 23:20:06 -07002065 };
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002066 struct flow_keys *flkeys = NULL, _flkeys;
Thomas Grafadaa70b2006-10-13 15:01:03 -07002067
Jiri Benc904af042015-08-20 13:56:31 +02002068 tun_info = skb_tunnel_info(skb);
Jiri Benc46fa0622015-08-28 20:48:19 +02002069 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
Jiri Benc904af042015-08-20 13:56:31 +02002070 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
Roopa Prabhu5e5d6fe2018-02-28 22:43:22 -05002071
2072 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
2073 flkeys = &_flkeys;
2074
Jakub Sitnicki23aebda2017-08-23 09:58:29 +02002075 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
David Ahernb4bac172018-03-02 08:32:18 -08002076 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
Jiri Benc06e9d042015-08-20 13:56:26 +02002077 skb_dst_drop(skb);
David Ahernb75cc8f2018-03-02 08:32:17 -08002078 skb_dst_set(skb,
2079 ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags));
Thomas Grafc71099a2006-08-04 23:20:06 -07002080}
2081
David Ahernb75cc8f2018-03-02 08:32:17 -08002082static struct rt6_info *ip6_pol_route_output(struct net *net,
2083 struct fib6_table *table,
2084 struct flowi6 *fl6,
2085 const struct sk_buff *skb,
2086 int flags)
Thomas Grafc71099a2006-08-04 23:20:06 -07002087{
David Ahernb75cc8f2018-03-02 08:32:17 -08002088 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
Thomas Grafc71099a2006-08-04 23:20:06 -07002089}
2090
Paolo Abeni6f21c962016-01-29 12:30:19 +01002091struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
2092 struct flowi6 *fl6, int flags)
Thomas Grafc71099a2006-08-04 23:20:06 -07002093{
David Ahernd46a9d62015-10-21 08:42:22 -07002094 bool any_src;
Thomas Grafc71099a2006-08-04 23:20:06 -07002095
David Ahern4c1feac2016-09-10 12:09:56 -07002096 if (rt6_need_strict(&fl6->daddr)) {
2097 struct dst_entry *dst;
2098
2099 dst = l3mdev_link_scope_lookup(net, fl6);
2100 if (dst)
2101 return dst;
2102 }
David Ahernca254492015-10-12 11:47:10 -07002103
Pavel Emelyanov1fb94892012-08-08 21:53:36 +00002104 fl6->flowi6_iif = LOOPBACK_IFINDEX;
David McCullough4dc27d1c2012-06-25 15:42:26 +00002105
David Ahernd46a9d62015-10-21 08:42:22 -07002106 any_src = ipv6_addr_any(&fl6->saddr);
David Ahern741a11d2015-09-28 10:12:13 -07002107 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
David Ahernd46a9d62015-10-21 08:42:22 -07002108 (fl6->flowi6_oif && any_src))
YOSHIFUJI Hideaki77d16f42006-08-23 17:25:05 -07002109 flags |= RT6_LOOKUP_F_IFACE;
Thomas Grafc71099a2006-08-04 23:20:06 -07002110
David Ahernd46a9d62015-10-21 08:42:22 -07002111 if (!any_src)
Thomas Grafadaa70b2006-10-13 15:01:03 -07002112 flags |= RT6_LOOKUP_F_HAS_SADDR;
YOSHIFUJI Hideaki / 吉藤英明0c9a2ac2010-03-07 00:14:44 +00002113 else if (sk)
2114 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
Thomas Grafadaa70b2006-10-13 15:01:03 -07002115
David Ahernb75cc8f2018-03-02 08:32:17 -08002116 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117}
Paolo Abeni6f21c962016-01-29 12:30:19 +01002118EXPORT_SYMBOL_GPL(ip6_route_output_flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
David S. Miller2774c132011-03-01 14:59:04 -08002120struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
David S. Miller14e50e52007-05-24 18:17:54 -07002121{
David S. Miller5c1e6aa2011-04-28 14:13:38 -07002122 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
Wei Wang1dbe32522017-06-17 10:42:26 -07002123 struct net_device *loopback_dev = net->loopback_dev;
David S. Miller14e50e52007-05-24 18:17:54 -07002124 struct dst_entry *new = NULL;
2125
Wei Wang1dbe32522017-06-17 10:42:26 -07002126 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
Steffen Klassert62cf27e2017-10-09 08:39:43 +02002127 DST_OBSOLETE_DEAD, 0);
David S. Miller14e50e52007-05-24 18:17:54 -07002128 if (rt) {
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002129 rt6_info_init(rt);
Wei Wang81eb8442017-10-06 12:06:11 -07002130 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002131
Changli Gaod8d1f302010-06-10 23:31:35 -07002132 new = &rt->dst;
David S. Miller14e50e52007-05-24 18:17:54 -07002133 new->__use = 1;
Herbert Xu352e5122007-11-13 21:34:06 -08002134 new->input = dst_discard;
Eric W. Biedermanede20592015-10-07 16:48:47 -05002135 new->output = dst_discard_out;
David S. Miller14e50e52007-05-24 18:17:54 -07002136
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002137 dst_copy_metrics(new, &ort->dst);
David S. Miller14e50e52007-05-24 18:17:54 -07002138
Wei Wang1dbe32522017-06-17 10:42:26 -07002139 rt->rt6i_idev = in6_dev_get(loopback_dev);
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00002140 rt->rt6i_gateway = ort->rt6i_gateway;
Martin KaFai Lau0a1f5962015-10-15 16:39:58 -07002141 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
David S. Miller14e50e52007-05-24 18:17:54 -07002142 rt->rt6i_metric = 0;
2143
2144 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2145#ifdef CONFIG_IPV6_SUBTREES
2146 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2147#endif
David S. Miller14e50e52007-05-24 18:17:54 -07002148 }
2149
David S. Miller69ead7a2011-03-01 14:45:33 -08002150 dst_release(dst_orig);
2151 return new ? new : ERR_PTR(-ENOMEM);
David S. Miller14e50e52007-05-24 18:17:54 -07002152}
David S. Miller14e50e52007-05-24 18:17:54 -07002153
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154/*
2155 * Destination cache support functions
2156 */
2157
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002158static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
2159{
Steffen Klassert36143642017-08-25 09:05:42 +02002160 u32 rt_cookie = 0;
Wei Wangc5cff852017-08-21 09:47:10 -07002161
2162 if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002163 return NULL;
2164
2165 if (rt6_check_expired(rt))
2166 return NULL;
2167
2168 return &rt->dst;
2169}
2170
2171static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
2172{
Martin KaFai Lau5973fb12015-11-11 11:51:07 -08002173 if (!__rt6_check_expired(rt) &&
2174 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
David Miller3a2232e2017-11-28 15:40:40 -05002175 rt6_check(rt->from, cookie))
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002176 return &rt->dst;
2177 else
2178 return NULL;
2179}
2180
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2182{
2183 struct rt6_info *rt;
2184
2185 rt = (struct rt6_info *) dst;
2186
Nicolas Dichtel6f3118b2012-09-10 22:09:46 +00002187 /* All IPV6 dsts are created with ->obsolete set to the value
2188 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2189 * into this function always.
2190 */
Hannes Frederic Sowae3bc10b2013-10-24 07:48:24 +02002191
Martin KaFai Lau02bcf4e2015-11-11 11:51:08 -08002192 if (rt->rt6i_flags & RTF_PCPU ||
David Miller3a2232e2017-11-28 15:40:40 -05002193 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
Martin KaFai Lau3da59bd2015-05-22 20:56:03 -07002194 return rt6_dst_from_check(rt, cookie);
2195 else
2196 return rt6_check(rt, cookie);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197}
2198
2199static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2200{
2201 struct rt6_info *rt = (struct rt6_info *) dst;
2202
2203 if (rt) {
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002204 if (rt->rt6i_flags & RTF_CACHE) {
2205 if (rt6_check_expired(rt)) {
David Ahernafb1d4b52018-04-17 17:33:11 -07002206 ip6_del_rt(dev_net(dst->dev), rt);
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002207 dst = NULL;
2208 }
2209 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 dst_release(dst);
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002211 dst = NULL;
2212 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213 }
YOSHIFUJI Hideaki / 吉藤英明54c1a852010-03-28 07:15:45 +00002214 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215}
2216
2217static void ip6_link_failure(struct sk_buff *skb)
2218{
2219 struct rt6_info *rt;
2220
Alexey Dobriyan3ffe5332010-02-18 08:25:24 +00002221 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
Eric Dumazetadf30902009-06-02 05:19:30 +00002223 rt = (struct rt6_info *) skb_dst(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 if (rt) {
Hannes Frederic Sowa1eb4f752013-07-10 23:00:57 +02002225 if (rt->rt6i_flags & RTF_CACHE) {
Wei Wangad65a2f2017-06-17 10:42:35 -07002226 if (dst_hold_safe(&rt->dst))
David Ahernafb1d4b52018-04-17 17:33:11 -07002227 ip6_del_rt(dev_net(rt->dst.dev), rt);
Wei Wangc5cff852017-08-21 09:47:10 -07002228 } else {
2229 struct fib6_node *fn;
2230
2231 rcu_read_lock();
2232 fn = rcu_dereference(rt->rt6i_node);
2233 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2234 fn->fn_sernum = -1;
2235 rcu_read_unlock();
Hannes Frederic Sowa1eb4f752013-07-10 23:00:57 +02002236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 }
2238}
2239
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002240static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2241{
2242 struct net *net = dev_net(rt->dst.dev);
2243
David Ahernd4ead6b2018-04-17 17:33:16 -07002244 dst_metric_set(&rt->dst, RTAX_MTU, mtu);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002245 rt->rt6i_flags |= RTF_MODIFIED;
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002246 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2247}
2248
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002249static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2250{
2251 return !(rt->rt6i_flags & RTF_CACHE) &&
Wei Wang4e587ea2017-08-25 15:03:10 -07002252 (rt->rt6i_flags & RTF_PCPU ||
2253 rcu_access_pointer(rt->rt6i_node));
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002254}
2255
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002256static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2257 const struct ipv6hdr *iph, u32 mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258{
Julian Anastasov0dec8792017-02-06 23:14:16 +02002259 const struct in6_addr *daddr, *saddr;
Ian Morris67ba4152014-08-24 21:53:10 +01002260 struct rt6_info *rt6 = (struct rt6_info *)dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002262 if (rt6->rt6i_flags & RTF_LOCAL)
2263 return;
2264
Xin Long19bda362016-10-28 18:18:01 +08002265 if (dst_metric_locked(dst, RTAX_MTU))
2266 return;
2267
Julian Anastasov0dec8792017-02-06 23:14:16 +02002268 if (iph) {
2269 daddr = &iph->daddr;
2270 saddr = &iph->saddr;
2271 } else if (sk) {
2272 daddr = &sk->sk_v6_daddr;
2273 saddr = &inet6_sk(sk)->saddr;
2274 } else {
2275 daddr = NULL;
2276 saddr = NULL;
2277 }
2278 dst_confirm_neigh(dst, daddr);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002279 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2280 if (mtu >= dst_mtu(dst))
2281 return;
David S. Miller81aded22012-06-15 14:54:11 -07002282
Martin KaFai Lau0d3f6d22015-11-11 11:51:06 -08002283 if (!rt6_cache_allowed_for_pmtu(rt6)) {
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002284 rt6_do_update_pmtu(rt6, mtu);
Wei Wang2b760fc2017-10-06 12:06:03 -07002285 /* update rt6_ex->stamp for cache */
2286 if (rt6->rt6i_flags & RTF_CACHE)
2287 rt6_update_exception_stamp_rt(rt6);
Julian Anastasov0dec8792017-02-06 23:14:16 +02002288 } else if (daddr) {
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002289 struct rt6_info *nrt6;
Hagen Paul Pfeifer9d289712015-01-15 22:34:25 +01002290
David Ahernd4ead6b2018-04-17 17:33:16 -07002291 nrt6 = ip6_rt_cache_alloc(rt6->from, daddr, saddr);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002292 if (nrt6) {
2293 rt6_do_update_pmtu(nrt6, mtu);
David Ahernd4ead6b2018-04-17 17:33:16 -07002294 if (rt6_insert_exception(nrt6, rt6->from))
Wei Wang2b760fc2017-10-06 12:06:03 -07002295 dst_release_immediate(&nrt6->dst);
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002296 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 }
2298}
2299
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002300static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2301 struct sk_buff *skb, u32 mtu)
2302{
2303 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2304}
2305
David S. Miller42ae66c2012-06-15 20:01:57 -07002306void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002307 int oif, u32 mark, kuid_t uid)
David S. Miller81aded22012-06-15 14:54:11 -07002308{
2309 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2310 struct dst_entry *dst;
2311 struct flowi6 fl6;
2312
2313 memset(&fl6, 0, sizeof(fl6));
2314 fl6.flowi6_oif = oif;
Lorenzo Colitti1b3c61d2014-05-13 10:17:34 -07002315 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
David S. Miller81aded22012-06-15 14:54:11 -07002316 fl6.daddr = iph->daddr;
2317 fl6.saddr = iph->saddr;
YOSHIFUJI Hideaki / 吉藤英明6502ca52013-01-13 05:01:51 +00002318 fl6.flowlabel = ip6_flowinfo(iph);
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002319 fl6.flowi6_uid = uid;
David S. Miller81aded22012-06-15 14:54:11 -07002320
2321 dst = ip6_route_output(net, NULL, &fl6);
2322 if (!dst->error)
Martin KaFai Lau45e4fd22015-05-22 20:56:00 -07002323 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
David S. Miller81aded22012-06-15 14:54:11 -07002324 dst_release(dst);
2325}
2326EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2327
2328void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2329{
Martin KaFai Lau33c162a2016-04-11 15:29:36 -07002330 struct dst_entry *dst;
2331
David S. Miller81aded22012-06-15 14:54:11 -07002332 ip6_update_pmtu(skb, sock_net(sk), mtu,
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002333 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
Martin KaFai Lau33c162a2016-04-11 15:29:36 -07002334
2335 dst = __sk_dst_get(sk);
2336 if (!dst || !dst->obsolete ||
2337 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2338 return;
2339
2340 bh_lock_sock(sk);
2341 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2342 ip6_datagram_dst_update(sk, false);
2343 bh_unlock_sock(sk);
David S. Miller81aded22012-06-15 14:54:11 -07002344}
2345EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2346
Alexey Kodanev7d6850f2018-04-03 15:00:07 +03002347void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2348 const struct flowi6 *fl6)
2349{
2350#ifdef CONFIG_IPV6_SUBTREES
2351 struct ipv6_pinfo *np = inet6_sk(sk);
2352#endif
2353
2354 ip6_dst_store(sk, dst,
2355 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2356 &sk->sk_v6_daddr : NULL,
2357#ifdef CONFIG_IPV6_SUBTREES
2358 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2359 &np->saddr :
2360#endif
2361 NULL);
2362}
2363
Duan Jiongb55b76b2013-09-04 19:44:21 +08002364/* Handle redirects */
2365struct ip6rd_flowi {
2366 struct flowi6 fl6;
2367 struct in6_addr gateway;
2368};
2369
2370static struct rt6_info *__ip6_route_redirect(struct net *net,
2371 struct fib6_table *table,
2372 struct flowi6 *fl6,
David Ahernb75cc8f2018-03-02 08:32:17 -08002373 const struct sk_buff *skb,
Duan Jiongb55b76b2013-09-04 19:44:21 +08002374 int flags)
2375{
2376 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
Wei Wang2b760fc2017-10-06 12:06:03 -07002377 struct rt6_info *rt, *rt_cache;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002378 struct fib6_node *fn;
2379
2380 /* Get the "current" route for this destination and
Alexander Alemayhu67c408c2017-01-07 23:53:00 +01002381 * check if the redirect has come from appropriate router.
Duan Jiongb55b76b2013-09-04 19:44:21 +08002382 *
2383 * RFC 4861 specifies that redirects should only be
2384 * accepted if they come from the nexthop to the target.
2385 * Due to the way the routes are chosen, this notion
2386 * is a bit fuzzy and one might need to check all possible
2387 * routes.
2388 */
2389
Wei Wang66f5d6c2017-10-06 12:06:10 -07002390 rcu_read_lock();
Duan Jiongb55b76b2013-09-04 19:44:21 +08002391 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2392restart:
Wei Wang66f5d6c2017-10-06 12:06:10 -07002393 for_each_fib6_node_rt_rcu(fn) {
David Ahern5e670d82018-04-17 17:33:14 -07002394 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmel8067bb82018-01-07 12:45:09 +02002395 continue;
David Ahern14895682018-04-17 17:33:17 -07002396 if (fib6_check_expired(rt))
Duan Jiongb55b76b2013-09-04 19:44:21 +08002397 continue;
David Ahern6edb3c92018-04-17 17:33:15 -07002398 if (rt->rt6i_flags & RTF_REJECT)
Duan Jiongb55b76b2013-09-04 19:44:21 +08002399 break;
2400 if (!(rt->rt6i_flags & RTF_GATEWAY))
2401 continue;
David Ahern5e670d82018-04-17 17:33:14 -07002402 if (fl6->flowi6_oif != rt->fib6_nh.nh_dev->ifindex)
Duan Jiongb55b76b2013-09-04 19:44:21 +08002403 continue;
Wei Wang2b760fc2017-10-06 12:06:03 -07002404 /* rt_cache's gateway might be different from its 'parent'
2405 * in the case of an ip redirect.
2406 * So we keep searching in the exception table if the gateway
2407 * is different.
2408 */
David Ahern5e670d82018-04-17 17:33:14 -07002409 if (!ipv6_addr_equal(&rdfl->gateway, &rt->fib6_nh.nh_gw)) {
Wei Wang2b760fc2017-10-06 12:06:03 -07002410 rt_cache = rt6_find_cached_rt(rt,
2411 &fl6->daddr,
2412 &fl6->saddr);
2413 if (rt_cache &&
2414 ipv6_addr_equal(&rdfl->gateway,
2415 &rt_cache->rt6i_gateway)) {
2416 rt = rt_cache;
2417 break;
2418 }
Duan Jiongb55b76b2013-09-04 19:44:21 +08002419 continue;
Wei Wang2b760fc2017-10-06 12:06:03 -07002420 }
Duan Jiongb55b76b2013-09-04 19:44:21 +08002421 break;
2422 }
2423
2424 if (!rt)
David Ahern421842e2018-04-17 17:33:18 -07002425 rt = net->ipv6.fib6_null_entry;
David Ahern6edb3c92018-04-17 17:33:15 -07002426 else if (rt->rt6i_flags & RTF_REJECT) {
Duan Jiongb55b76b2013-09-04 19:44:21 +08002427 rt = net->ipv6.ip6_null_entry;
Martin KaFai Laub0a1ba52015-01-20 19:16:02 -08002428 goto out;
2429 }
2430
David Ahern421842e2018-04-17 17:33:18 -07002431 if (rt == net->ipv6.fib6_null_entry) {
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07002432 fn = fib6_backtrack(fn, &fl6->saddr);
2433 if (fn)
2434 goto restart;
Duan Jiongb55b76b2013-09-04 19:44:21 +08002435 }
Martin KaFai Laua3c00e42014-10-20 13:42:43 -07002436
Martin KaFai Laub0a1ba52015-01-20 19:16:02 -08002437out:
Wei Wangd3843fe2017-10-06 12:06:06 -07002438 ip6_hold_safe(net, &rt, true);
Duan Jiongb55b76b2013-09-04 19:44:21 +08002439
Wei Wang66f5d6c2017-10-06 12:06:10 -07002440 rcu_read_unlock();
Duan Jiongb55b76b2013-09-04 19:44:21 +08002441
Paolo Abenib65f1642017-10-19 09:31:43 +02002442 trace_fib6_table_lookup(net, rt, table, fl6);
Duan Jiongb55b76b2013-09-04 19:44:21 +08002443 return rt;
2444};
2445
2446static struct dst_entry *ip6_route_redirect(struct net *net,
David Ahernb75cc8f2018-03-02 08:32:17 -08002447 const struct flowi6 *fl6,
2448 const struct sk_buff *skb,
2449 const struct in6_addr *gateway)
Duan Jiongb55b76b2013-09-04 19:44:21 +08002450{
2451 int flags = RT6_LOOKUP_F_HAS_SADDR;
2452 struct ip6rd_flowi rdfl;
2453
2454 rdfl.fl6 = *fl6;
2455 rdfl.gateway = *gateway;
2456
David Ahernb75cc8f2018-03-02 08:32:17 -08002457 return fib6_rule_lookup(net, &rdfl.fl6, skb,
Duan Jiongb55b76b2013-09-04 19:44:21 +08002458 flags, __ip6_route_redirect);
2459}
2460
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002461void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2462 kuid_t uid)
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002463{
2464 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2465 struct dst_entry *dst;
2466 struct flowi6 fl6;
2467
2468 memset(&fl6, 0, sizeof(fl6));
Julian Anastasove374c612014-04-28 10:51:56 +03002469 fl6.flowi6_iif = LOOPBACK_IFINDEX;
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002470 fl6.flowi6_oif = oif;
2471 fl6.flowi6_mark = mark;
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002472 fl6.daddr = iph->daddr;
2473 fl6.saddr = iph->saddr;
YOSHIFUJI Hideaki / 吉藤英明6502ca52013-01-13 05:01:51 +00002474 fl6.flowlabel = ip6_flowinfo(iph);
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002475 fl6.flowi6_uid = uid;
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002476
David Ahernb75cc8f2018-03-02 08:32:17 -08002477 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
Duan Jiongb55b76b2013-09-04 19:44:21 +08002478 rt6_do_redirect(dst, NULL, skb);
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002479 dst_release(dst);
2480}
2481EXPORT_SYMBOL_GPL(ip6_redirect);
2482
Duan Jiongc92a59e2013-08-22 12:07:35 +08002483void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
2484 u32 mark)
2485{
2486 const struct ipv6hdr *iph = ipv6_hdr(skb);
2487 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2488 struct dst_entry *dst;
2489 struct flowi6 fl6;
2490
2491 memset(&fl6, 0, sizeof(fl6));
Julian Anastasove374c612014-04-28 10:51:56 +03002492 fl6.flowi6_iif = LOOPBACK_IFINDEX;
Duan Jiongc92a59e2013-08-22 12:07:35 +08002493 fl6.flowi6_oif = oif;
2494 fl6.flowi6_mark = mark;
Duan Jiongc92a59e2013-08-22 12:07:35 +08002495 fl6.daddr = msg->dest;
2496 fl6.saddr = iph->daddr;
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002497 fl6.flowi6_uid = sock_net_uid(net, NULL);
Duan Jiongc92a59e2013-08-22 12:07:35 +08002498
David Ahernb75cc8f2018-03-02 08:32:17 -08002499 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
Duan Jiongb55b76b2013-09-04 19:44:21 +08002500 rt6_do_redirect(dst, NULL, skb);
Duan Jiongc92a59e2013-08-22 12:07:35 +08002501 dst_release(dst);
2502}
2503
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002504void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2505{
Lorenzo Colittie2d118a2016-11-04 02:23:43 +09002506 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2507 sk->sk_uid);
David S. Miller3a5ad2e2012-07-12 00:08:07 -07002508}
2509EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2510
David S. Miller0dbaee32010-12-13 12:52:14 -08002511static unsigned int ip6_default_advmss(const struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512{
David S. Miller0dbaee32010-12-13 12:52:14 -08002513 struct net_device *dev = dst->dev;
2514 unsigned int mtu = dst_mtu(dst);
2515 struct net *net = dev_net(dev);
2516
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2518
Daniel Lezcano55786892008-03-04 13:47:47 -08002519 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2520 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
2522 /*
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09002523 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2524 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2525 * IPV6_MAXPLEN is also valid and means: "any MSS,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526 * rely only on pmtu discovery"
2527 */
2528 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2529 mtu = IPV6_MAXPLEN;
2530 return mtu;
2531}
2532
Steffen Klassertebb762f2011-11-23 02:12:51 +00002533static unsigned int ip6_mtu(const struct dst_entry *dst)
David S. Millerd33e4552010-12-14 13:01:14 -08002534{
David S. Millerd33e4552010-12-14 13:01:14 -08002535 struct inet6_dev *idev;
David Ahernd4ead6b2018-04-17 17:33:16 -07002536 unsigned int mtu;
Steffen Klassert618f9bc2011-11-23 02:13:31 +00002537
Martin KaFai Lau4b32b5a2015-04-28 13:03:06 -07002538 mtu = dst_metric_raw(dst, RTAX_MTU);
2539 if (mtu)
2540 goto out;
2541
Steffen Klassert618f9bc2011-11-23 02:13:31 +00002542 mtu = IPV6_MIN_MTU;
David S. Millerd33e4552010-12-14 13:01:14 -08002543
2544 rcu_read_lock();
2545 idev = __in6_dev_get(dst->dev);
2546 if (idev)
2547 mtu = idev->cnf.mtu6;
2548 rcu_read_unlock();
2549
Eric Dumazet30f78d82014-04-10 21:23:36 -07002550out:
Roopa Prabhu14972cb2016-08-24 20:10:43 -07002551 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2552
2553 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
David S. Millerd33e4552010-12-14 13:01:14 -08002554}
2555
YOSHIFUJI Hideaki3b009442007-12-06 16:11:48 -08002556struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
David S. Miller87a11572011-12-06 17:04:13 -05002557 struct flowi6 *fl6)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558{
David S. Miller87a11572011-12-06 17:04:13 -05002559 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 struct rt6_info *rt;
2561 struct inet6_dev *idev = in6_dev_get(dev);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09002562 struct net *net = dev_net(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563
David S. Miller38308472011-12-03 18:02:47 -05002564 if (unlikely(!idev))
Eric Dumazet122bdf62012-03-14 21:13:11 +00002565 return ERR_PTR(-ENODEV);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566
Martin KaFai Lauad706862015-08-14 11:05:52 -07002567 rt = ip6_dst_alloc(net, dev, 0);
David S. Miller38308472011-12-03 18:02:47 -05002568 if (unlikely(!rt)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 in6_dev_put(idev);
David S. Miller87a11572011-12-06 17:04:13 -05002570 dst = ERR_PTR(-ENOMEM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 goto out;
2572 }
2573
Yan, Zheng8e2ec632011-09-05 21:34:30 +00002574 rt->dst.flags |= DST_HOST;
Brendan McGrath588753f2017-12-13 22:14:57 +11002575 rt->dst.input = ip6_input;
Yan, Zheng8e2ec632011-09-05 21:34:30 +00002576 rt->dst.output = ip6_output;
Julian Anastasov550bab42013-10-20 15:43:04 +03002577 rt->rt6i_gateway = fl6->daddr;
David S. Miller87a11572011-12-06 17:04:13 -05002578 rt->rt6i_dst.addr = fl6->daddr;
Yan, Zheng8e2ec632011-09-05 21:34:30 +00002579 rt->rt6i_dst.plen = 128;
2580 rt->rt6i_idev = idev;
Li RongQing14edd872012-10-24 14:01:18 +08002581 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582
Ido Schimmel4c981e22018-01-07 12:45:04 +02002583 /* Add this dst into uncached_list so that rt6_disable_ip() can
Wei Wang587fea72017-06-17 10:42:36 -07002584 * do proper release of the net_device
2585 */
2586 rt6_uncached_list_add(rt);
Wei Wang81eb8442017-10-06 12:06:11 -07002587 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588
David S. Miller87a11572011-12-06 17:04:13 -05002589 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2590
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591out:
David S. Miller87a11572011-12-06 17:04:13 -05002592 return dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593}
2594
Daniel Lezcano569d3642008-01-18 03:56:57 -08002595static int ip6_dst_gc(struct dst_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002596{
Alexey Dobriyan86393e52009-08-29 01:34:49 +00002597 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
Daniel Lezcano7019b782008-03-04 13:50:14 -08002598 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2599 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2600 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2601 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2602 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
Eric Dumazetfc66f952010-10-08 06:37:34 +00002603 int entries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604
Eric Dumazetfc66f952010-10-08 06:37:34 +00002605 entries = dst_entries_get_fast(ops);
Michal Kubeček49a18d82013-08-01 10:04:24 +02002606 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
Eric Dumazetfc66f952010-10-08 06:37:34 +00002607 entries <= rt_max_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 goto out;
2609
Benjamin Thery6891a342008-03-04 13:49:47 -08002610 net->ipv6.ip6_rt_gc_expire++;
Li RongQing14956642014-05-19 17:30:28 +08002611 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
Eric Dumazetfc66f952010-10-08 06:37:34 +00002612 entries = dst_entries_get_slow(ops);
2613 if (entries < ops->gc_thresh)
Daniel Lezcano7019b782008-03-04 13:50:14 -08002614 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615out:
Daniel Lezcano7019b782008-03-04 13:50:14 -08002616 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
Eric Dumazetfc66f952010-10-08 06:37:34 +00002617 return entries > rt_max_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618}
2619
David Ahernd4ead6b2018-04-17 17:33:16 -07002620static int ip6_convert_metrics(struct net *net, struct rt6_info *rt,
2621 struct fib6_config *cfg)
Florian Westphale715b6d2015-01-05 23:57:44 +01002622{
David Ahernd4ead6b2018-04-17 17:33:16 -07002623 int err = 0;
Florian Westphale715b6d2015-01-05 23:57:44 +01002624
David Ahernd4ead6b2018-04-17 17:33:16 -07002625 if (cfg->fc_mx) {
2626 rt->fib6_metrics = kzalloc(sizeof(*rt->fib6_metrics),
2627 GFP_KERNEL);
2628 if (unlikely(!rt->fib6_metrics))
2629 return -ENOMEM;
Florian Westphale715b6d2015-01-05 23:57:44 +01002630
David Ahernd4ead6b2018-04-17 17:33:16 -07002631 refcount_set(&rt->fib6_metrics->refcnt, 1);
Florian Westphale715b6d2015-01-05 23:57:44 +01002632
David Ahernd4ead6b2018-04-17 17:33:16 -07002633 err = ip_metrics_convert(net, cfg->fc_mx, cfg->fc_mx_len,
2634 rt->fib6_metrics->metrics);
Florian Westphale715b6d2015-01-05 23:57:44 +01002635 }
2636
David Ahernd4ead6b2018-04-17 17:33:16 -07002637 return err;
Florian Westphale715b6d2015-01-05 23:57:44 +01002638}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639
David Ahern8c145862016-04-24 21:26:04 -07002640static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2641 struct fib6_config *cfg,
David Ahernf4797b32018-01-25 16:55:08 -08002642 const struct in6_addr *gw_addr,
2643 u32 tbid, int flags)
David Ahern8c145862016-04-24 21:26:04 -07002644{
2645 struct flowi6 fl6 = {
2646 .flowi6_oif = cfg->fc_ifindex,
2647 .daddr = *gw_addr,
2648 .saddr = cfg->fc_prefsrc,
2649 };
2650 struct fib6_table *table;
2651 struct rt6_info *rt;
David Ahern8c145862016-04-24 21:26:04 -07002652
David Ahernf4797b32018-01-25 16:55:08 -08002653 table = fib6_get_table(net, tbid);
David Ahern8c145862016-04-24 21:26:04 -07002654 if (!table)
2655 return NULL;
2656
2657 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2658 flags |= RT6_LOOKUP_F_HAS_SADDR;
2659
David Ahernf4797b32018-01-25 16:55:08 -08002660 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
David Ahernb75cc8f2018-03-02 08:32:17 -08002661 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
David Ahern8c145862016-04-24 21:26:04 -07002662
2663 /* if table lookup failed, fall back to full lookup */
2664 if (rt == net->ipv6.ip6_null_entry) {
2665 ip6_rt_put(rt);
2666 rt = NULL;
2667 }
2668
2669 return rt;
2670}
2671
David Ahernfc1e64e2018-01-25 16:55:09 -08002672static int ip6_route_check_nh_onlink(struct net *net,
2673 struct fib6_config *cfg,
David Ahern9fbb7042018-03-13 08:29:36 -07002674 const struct net_device *dev,
David Ahernfc1e64e2018-01-25 16:55:09 -08002675 struct netlink_ext_ack *extack)
2676{
David Ahern44750f82018-02-06 13:17:06 -08002677 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
David Ahernfc1e64e2018-01-25 16:55:09 -08002678 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2679 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
2680 struct rt6_info *grt;
2681 int err;
2682
2683 err = 0;
2684 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2685 if (grt) {
David Ahern58e354c2018-02-06 12:14:12 -08002686 if (!grt->dst.error &&
2687 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
David Ahern44750f82018-02-06 13:17:06 -08002688 NL_SET_ERR_MSG(extack,
2689 "Nexthop has invalid gateway or device mismatch");
David Ahernfc1e64e2018-01-25 16:55:09 -08002690 err = -EINVAL;
2691 }
2692
2693 ip6_rt_put(grt);
2694 }
2695
2696 return err;
2697}
2698
David Ahern1edce992018-01-25 16:55:07 -08002699static int ip6_route_check_nh(struct net *net,
2700 struct fib6_config *cfg,
2701 struct net_device **_dev,
2702 struct inet6_dev **idev)
2703{
2704 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2705 struct net_device *dev = _dev ? *_dev : NULL;
2706 struct rt6_info *grt = NULL;
2707 int err = -EHOSTUNREACH;
2708
2709 if (cfg->fc_table) {
David Ahernf4797b32018-01-25 16:55:08 -08002710 int flags = RT6_LOOKUP_F_IFACE;
2711
2712 grt = ip6_nh_lookup_table(net, cfg, gw_addr,
2713 cfg->fc_table, flags);
David Ahern1edce992018-01-25 16:55:07 -08002714 if (grt) {
2715 if (grt->rt6i_flags & RTF_GATEWAY ||
2716 (dev && dev != grt->dst.dev)) {
2717 ip6_rt_put(grt);
2718 grt = NULL;
2719 }
2720 }
2721 }
2722
2723 if (!grt)
David Ahernb75cc8f2018-03-02 08:32:17 -08002724 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
David Ahern1edce992018-01-25 16:55:07 -08002725
2726 if (!grt)
2727 goto out;
2728
2729 if (dev) {
2730 if (dev != grt->dst.dev) {
2731 ip6_rt_put(grt);
2732 goto out;
2733 }
2734 } else {
2735 *_dev = dev = grt->dst.dev;
2736 *idev = grt->rt6i_idev;
2737 dev_hold(dev);
2738 in6_dev_hold(grt->rt6i_idev);
2739 }
2740
2741 if (!(grt->rt6i_flags & RTF_GATEWAY))
2742 err = 0;
2743
2744 ip6_rt_put(grt);
2745
2746out:
2747 return err;
2748}
2749
David Ahern9fbb7042018-03-13 08:29:36 -07002750static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
2751 struct net_device **_dev, struct inet6_dev **idev,
2752 struct netlink_ext_ack *extack)
2753{
2754 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2755 int gwa_type = ipv6_addr_type(gw_addr);
David Ahern232378e2018-03-13 08:29:37 -07002756 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
David Ahern9fbb7042018-03-13 08:29:36 -07002757 const struct net_device *dev = *_dev;
David Ahern232378e2018-03-13 08:29:37 -07002758 bool need_addr_check = !dev;
David Ahern9fbb7042018-03-13 08:29:36 -07002759 int err = -EINVAL;
2760
2761 /* if gw_addr is local we will fail to detect this in case
2762 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2763 * will return already-added prefix route via interface that
2764 * prefix route was assigned to, which might be non-loopback.
2765 */
David Ahern232378e2018-03-13 08:29:37 -07002766 if (dev &&
2767 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2768 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
David Ahern9fbb7042018-03-13 08:29:36 -07002769 goto out;
2770 }
2771
2772 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
2773 /* IPv6 strictly inhibits using not link-local
2774 * addresses as nexthop address.
2775 * Otherwise, router will not able to send redirects.
2776 * It is very good, but in some (rare!) circumstances
2777 * (SIT, PtP, NBMA NOARP links) it is handy to allow
2778 * some exceptions. --ANK
2779 * We allow IPv4-mapped nexthops to support RFC4798-type
2780 * addressing
2781 */
2782 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
2783 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2784 goto out;
2785 }
2786
2787 if (cfg->fc_flags & RTNH_F_ONLINK)
2788 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
2789 else
2790 err = ip6_route_check_nh(net, cfg, _dev, idev);
2791
2792 if (err)
2793 goto out;
2794 }
2795
2796 /* reload in case device was changed */
2797 dev = *_dev;
2798
2799 err = -EINVAL;
2800 if (!dev) {
2801 NL_SET_ERR_MSG(extack, "Egress device not specified");
2802 goto out;
2803 } else if (dev->flags & IFF_LOOPBACK) {
2804 NL_SET_ERR_MSG(extack,
2805 "Egress device can not be loopback device for this route");
2806 goto out;
2807 }
David Ahern232378e2018-03-13 08:29:37 -07002808
2809 /* if we did not check gw_addr above, do so now that the
2810 * egress device has been resolved.
2811 */
2812 if (need_addr_check &&
2813 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2814 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2815 goto out;
2816 }
2817
David Ahern9fbb7042018-03-13 08:29:36 -07002818 err = 0;
2819out:
2820 return err;
2821}
2822
David Ahern333c4302017-05-21 10:12:04 -06002823static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
2824 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825{
Daniel Lezcano55786892008-03-04 13:47:47 -08002826 struct net *net = cfg->fc_nlinfo.nl_net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 struct rt6_info *rt = NULL;
2828 struct net_device *dev = NULL;
2829 struct inet6_dev *idev = NULL;
Thomas Grafc71099a2006-08-04 23:20:06 -07002830 struct fib6_table *table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 int addr_type;
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07002832 int err = -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833
David Ahern557c44b2017-04-19 14:19:43 -07002834 /* RTF_PCPU is an internal flag; can not be set by userspace */
David Ahernd5d531c2017-05-21 10:12:05 -06002835 if (cfg->fc_flags & RTF_PCPU) {
2836 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
David Ahern557c44b2017-04-19 14:19:43 -07002837 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06002838 }
David Ahern557c44b2017-04-19 14:19:43 -07002839
Wei Wang2ea23522017-10-27 17:30:12 -07002840 /* RTF_CACHE is an internal flag; can not be set by userspace */
2841 if (cfg->fc_flags & RTF_CACHE) {
2842 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
2843 goto out;
2844 }
2845
David Aherne8478e82018-04-17 17:33:13 -07002846 if (cfg->fc_type > RTN_MAX) {
2847 NL_SET_ERR_MSG(extack, "Invalid route type");
2848 goto out;
2849 }
2850
David Ahernd5d531c2017-05-21 10:12:05 -06002851 if (cfg->fc_dst_len > 128) {
2852 NL_SET_ERR_MSG(extack, "Invalid prefix length");
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07002853 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06002854 }
2855 if (cfg->fc_src_len > 128) {
2856 NL_SET_ERR_MSG(extack, "Invalid source address length");
2857 goto out;
2858 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859#ifndef CONFIG_IPV6_SUBTREES
David Ahernd5d531c2017-05-21 10:12:05 -06002860 if (cfg->fc_src_len) {
2861 NL_SET_ERR_MSG(extack,
2862 "Specifying source address requires IPV6_SUBTREES to be enabled");
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07002863 goto out;
David Ahernd5d531c2017-05-21 10:12:05 -06002864 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865#endif
Thomas Graf86872cb2006-08-22 00:01:08 -07002866 if (cfg->fc_ifindex) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 err = -ENODEV;
Daniel Lezcano55786892008-03-04 13:47:47 -08002868 dev = dev_get_by_index(net, cfg->fc_ifindex);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 if (!dev)
2870 goto out;
2871 idev = in6_dev_get(dev);
2872 if (!idev)
2873 goto out;
2874 }
2875
Thomas Graf86872cb2006-08-22 00:01:08 -07002876 if (cfg->fc_metric == 0)
2877 cfg->fc_metric = IP6_RT_PRIO_USER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878
David Ahernfc1e64e2018-01-25 16:55:09 -08002879 if (cfg->fc_flags & RTNH_F_ONLINK) {
2880 if (!dev) {
2881 NL_SET_ERR_MSG(extack,
2882 "Nexthop device required for onlink");
2883 err = -ENODEV;
2884 goto out;
2885 }
2886
2887 if (!(dev->flags & IFF_UP)) {
2888 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
2889 err = -ENETDOWN;
2890 goto out;
2891 }
2892 }
2893
Matti Vaittinend71314b2011-11-14 00:14:49 +00002894 err = -ENOBUFS;
David S. Miller38308472011-12-03 18:02:47 -05002895 if (cfg->fc_nlinfo.nlh &&
2896 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
Matti Vaittinend71314b2011-11-14 00:14:49 +00002897 table = fib6_get_table(net, cfg->fc_table);
David S. Miller38308472011-12-03 18:02:47 -05002898 if (!table) {
Joe Perchesf3213832012-05-15 14:11:53 +00002899 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
Matti Vaittinend71314b2011-11-14 00:14:49 +00002900 table = fib6_new_table(net, cfg->fc_table);
2901 }
2902 } else {
2903 table = fib6_new_table(net, cfg->fc_table);
2904 }
David S. Miller38308472011-12-03 18:02:47 -05002905
2906 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07002907 goto out;
Thomas Grafc71099a2006-08-04 23:20:06 -07002908
Martin KaFai Lauad706862015-08-14 11:05:52 -07002909 rt = ip6_dst_alloc(net, NULL,
2910 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911
David S. Miller38308472011-12-03 18:02:47 -05002912 if (!rt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 err = -ENOMEM;
2914 goto out;
2915 }
2916
David Ahernd4ead6b2018-04-17 17:33:16 -07002917 err = ip6_convert_metrics(net, rt, cfg);
2918 if (err < 0)
2919 goto out;
2920
Gao feng1716a962012-04-06 00:13:10 +00002921 if (cfg->fc_flags & RTF_EXPIRES)
David Ahern14895682018-04-17 17:33:17 -07002922 fib6_set_expires(rt, jiffies +
Gao feng1716a962012-04-06 00:13:10 +00002923 clock_t_to_jiffies(cfg->fc_expires));
2924 else
David Ahern14895682018-04-17 17:33:17 -07002925 fib6_clean_expires(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926
Thomas Graf86872cb2006-08-22 00:01:08 -07002927 if (cfg->fc_protocol == RTPROT_UNSPEC)
2928 cfg->fc_protocol = RTPROT_BOOT;
2929 rt->rt6i_protocol = cfg->fc_protocol;
2930
2931 addr_type = ipv6_addr_type(&cfg->fc_dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932
Roopa Prabhu19e42e42015-07-21 10:43:48 +02002933 if (cfg->fc_encap) {
2934 struct lwtunnel_state *lwtstate;
2935
David Ahern30357d72017-01-30 12:07:37 -08002936 err = lwtunnel_build_state(cfg->fc_encap_type,
Tom Herbert127eb7c2015-08-24 09:45:41 -07002937 cfg->fc_encap, AF_INET6, cfg,
David Ahern9ae28722017-05-27 16:19:28 -06002938 &lwtstate, extack);
Roopa Prabhu19e42e42015-07-21 10:43:48 +02002939 if (err)
2940 goto out;
David Ahern5e670d82018-04-17 17:33:14 -07002941 rt->fib6_nh.nh_lwtstate = lwtstate_get(lwtstate);
Roopa Prabhu19e42e42015-07-21 10:43:48 +02002942 }
2943
Thomas Graf86872cb2006-08-22 00:01:08 -07002944 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
2945 rt->rt6i_dst.plen = cfg->fc_dst_len;
Martin KaFai Lauafc4eef2015-04-28 13:03:07 -07002946 if (rt->rt6i_dst.plen == 128)
Michal Kubečeke5fd3872014-03-27 13:04:08 +01002947 rt->dst.flags |= DST_HOST;
Michal Kubečeke5fd3872014-03-27 13:04:08 +01002948
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949#ifdef CONFIG_IPV6_SUBTREES
Thomas Graf86872cb2006-08-22 00:01:08 -07002950 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
2951 rt->rt6i_src.plen = cfg->fc_src_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952#endif
2953
Thomas Graf86872cb2006-08-22 00:01:08 -07002954 rt->rt6i_metric = cfg->fc_metric;
David Ahern5e670d82018-04-17 17:33:14 -07002955 rt->fib6_nh.nh_weight = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956
David Aherne8478e82018-04-17 17:33:13 -07002957 rt->fib6_type = cfg->fc_type;
2958
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 /* We cannot add true routes via loopback here,
2960 they would result in kernel looping; promote them to reject routes
2961 */
Thomas Graf86872cb2006-08-22 00:01:08 -07002962 if ((cfg->fc_flags & RTF_REJECT) ||
David S. Miller38308472011-12-03 18:02:47 -05002963 (dev && (dev->flags & IFF_LOOPBACK) &&
2964 !(addr_type & IPV6_ADDR_LOOPBACK) &&
2965 !(cfg->fc_flags & RTF_LOCAL))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 /* hold loopback dev/idev if we haven't done so. */
Daniel Lezcano55786892008-03-04 13:47:47 -08002967 if (dev != net->loopback_dev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002968 if (dev) {
2969 dev_put(dev);
2970 in6_dev_put(idev);
2971 }
Daniel Lezcano55786892008-03-04 13:47:47 -08002972 dev = net->loopback_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973 dev_hold(dev);
2974 idev = in6_dev_get(dev);
2975 if (!idev) {
2976 err = -ENODEV;
2977 goto out;
2978 }
2979 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002980 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
2981 goto install_route;
2982 }
2983
Thomas Graf86872cb2006-08-22 00:01:08 -07002984 if (cfg->fc_flags & RTF_GATEWAY) {
David Ahern9fbb7042018-03-13 08:29:36 -07002985 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
2986 if (err)
Florian Westphal48ed7b22015-05-21 00:25:41 +02002987 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988
David Ahern5e670d82018-04-17 17:33:14 -07002989 rt->fib6_nh.nh_gw = rt->rt6i_gateway = cfg->fc_gateway;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002990 }
2991
2992 err = -ENODEV;
David S. Miller38308472011-12-03 18:02:47 -05002993 if (!dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994 goto out;
2995
Lorenzo Bianconi428604f2018-03-29 11:02:24 +02002996 if (idev->cnf.disable_ipv6) {
2997 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
2998 err = -EACCES;
2999 goto out;
3000 }
3001
David Ahern955ec4c2018-01-24 19:45:29 -08003002 if (!(dev->flags & IFF_UP)) {
3003 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
3004 err = -ENETDOWN;
3005 goto out;
3006 }
3007
Daniel Walterc3968a82011-04-13 21:10:57 +00003008 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
3009 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
David Ahernd5d531c2017-05-21 10:12:05 -06003010 NL_SET_ERR_MSG(extack, "Invalid source address");
Daniel Walterc3968a82011-04-13 21:10:57 +00003011 err = -EINVAL;
3012 goto out;
3013 }
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003014 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
Daniel Walterc3968a82011-04-13 21:10:57 +00003015 rt->rt6i_prefsrc.plen = 128;
3016 } else
3017 rt->rt6i_prefsrc.plen = 0;
3018
Thomas Graf86872cb2006-08-22 00:01:08 -07003019 rt->rt6i_flags = cfg->fc_flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003020
3021install_route:
Ido Schimmel5609b802018-01-07 12:45:06 +02003022 if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
3023 !netif_carrier_ok(dev))
David Ahern5e670d82018-04-17 17:33:14 -07003024 rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
3025 rt->fib6_nh.nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK);
3026 rt->fib6_nh.nh_dev = rt->dst.dev = dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003027 rt->rt6i_idev = idev;
Thomas Grafc71099a2006-08-04 23:20:06 -07003028 rt->rt6i_table = table;
Daniel Lezcano63152fc2008-03-03 23:31:11 -08003029
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09003030 cfg->fc_nlinfo.nl_net = dev_net(dev);
Daniel Lezcano63152fc2008-03-03 23:31:11 -08003031
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003032 return rt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033out:
3034 if (dev)
3035 dev_put(dev);
3036 if (idev)
3037 in6_dev_put(idev);
Wei Wang587fea72017-06-17 10:42:36 -07003038 if (rt)
3039 dst_release_immediate(&rt->dst);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003040
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003041 return ERR_PTR(err);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003042}
3043
David Ahernd4ead6b2018-04-17 17:33:16 -07003044int ip6_route_add(struct fib6_config *cfg, struct netlink_ext_ack *extack)
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003045{
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07003046 struct rt6_info *rt;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003047 int err;
3048
David Ahern333c4302017-05-21 10:12:04 -06003049 rt = ip6_route_info_create(cfg, extack);
David Ahernd4ead6b2018-04-17 17:33:16 -07003050 if (IS_ERR(rt))
3051 return PTR_ERR(rt);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003052
David Ahernd4ead6b2018-04-17 17:33:16 -07003053 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, extack);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07003054
Linus Torvalds1da177e2005-04-16 15:20:36 -07003055 return err;
3056}
3057
Thomas Graf86872cb2006-08-22 00:01:08 -07003058static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059{
David Ahernafb1d4b52018-04-17 17:33:11 -07003060 struct net *net = info->nl_net;
Thomas Grafc71099a2006-08-04 23:20:06 -07003061 struct fib6_table *table;
David Ahernafb1d4b52018-04-17 17:33:11 -07003062 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063
David Ahern421842e2018-04-17 17:33:18 -07003064 if (rt == net->ipv6.fib6_null_entry) {
Gao feng6825a262012-09-19 19:25:34 +00003065 err = -ENOENT;
3066 goto out;
3067 }
Patrick McHardy6c813a72006-08-06 22:22:47 -07003068
Thomas Grafc71099a2006-08-04 23:20:06 -07003069 table = rt->rt6i_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003070 spin_lock_bh(&table->tb6_lock);
Thomas Graf86872cb2006-08-22 00:01:08 -07003071 err = fib6_del(rt, info);
Wei Wang66f5d6c2017-10-06 12:06:10 -07003072 spin_unlock_bh(&table->tb6_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003073
Gao feng6825a262012-09-19 19:25:34 +00003074out:
Amerigo Wang94e187c2012-10-29 00:13:19 +00003075 ip6_rt_put(rt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003076 return err;
3077}
3078
David Ahernafb1d4b52018-04-17 17:33:11 -07003079int ip6_del_rt(struct net *net, struct rt6_info *rt)
Thomas Grafe0a1ad732006-08-22 00:00:21 -07003080{
David Ahernafb1d4b52018-04-17 17:33:11 -07003081 struct nl_info info = { .nl_net = net };
3082
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08003083 return __ip6_del_rt(rt, &info);
Thomas Grafe0a1ad732006-08-22 00:00:21 -07003084}
3085
David Ahern0ae81332017-02-02 12:37:08 -08003086static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
3087{
3088 struct nl_info *info = &cfg->fc_nlinfo;
WANG Conge3330032017-02-27 16:07:43 -08003089 struct net *net = info->nl_net;
David Ahern16a16cd2017-02-02 12:37:11 -08003090 struct sk_buff *skb = NULL;
David Ahern0ae81332017-02-02 12:37:08 -08003091 struct fib6_table *table;
WANG Conge3330032017-02-27 16:07:43 -08003092 int err = -ENOENT;
David Ahern0ae81332017-02-02 12:37:08 -08003093
David Ahern421842e2018-04-17 17:33:18 -07003094 if (rt == net->ipv6.fib6_null_entry)
WANG Conge3330032017-02-27 16:07:43 -08003095 goto out_put;
David Ahern0ae81332017-02-02 12:37:08 -08003096 table = rt->rt6i_table;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003097 spin_lock_bh(&table->tb6_lock);
David Ahern0ae81332017-02-02 12:37:08 -08003098
3099 if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
3100 struct rt6_info *sibling, *next_sibling;
3101
David Ahern16a16cd2017-02-02 12:37:11 -08003102 /* prefer to send a single notification with all hops */
3103 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3104 if (skb) {
3105 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3106
David Ahernd4ead6b2018-04-17 17:33:16 -07003107 if (rt6_fill_node(net, skb, rt, NULL,
David Ahern16a16cd2017-02-02 12:37:11 -08003108 NULL, NULL, 0, RTM_DELROUTE,
3109 info->portid, seq, 0) < 0) {
3110 kfree_skb(skb);
3111 skb = NULL;
3112 } else
3113 info->skip_notify = 1;
3114 }
3115
David Ahern0ae81332017-02-02 12:37:08 -08003116 list_for_each_entry_safe(sibling, next_sibling,
3117 &rt->rt6i_siblings,
3118 rt6i_siblings) {
3119 err = fib6_del(sibling, info);
3120 if (err)
WANG Conge3330032017-02-27 16:07:43 -08003121 goto out_unlock;
David Ahern0ae81332017-02-02 12:37:08 -08003122 }
3123 }
3124
3125 err = fib6_del(rt, info);
WANG Conge3330032017-02-27 16:07:43 -08003126out_unlock:
Wei Wang66f5d6c2017-10-06 12:06:10 -07003127 spin_unlock_bh(&table->tb6_lock);
WANG Conge3330032017-02-27 16:07:43 -08003128out_put:
David Ahern0ae81332017-02-02 12:37:08 -08003129 ip6_rt_put(rt);
David Ahern16a16cd2017-02-02 12:37:11 -08003130
3131 if (skb) {
WANG Conge3330032017-02-27 16:07:43 -08003132 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
David Ahern16a16cd2017-02-02 12:37:11 -08003133 info->nlh, gfp_any());
3134 }
David Ahern0ae81332017-02-02 12:37:08 -08003135 return err;
3136}
3137
David Ahern333c4302017-05-21 10:12:04 -06003138static int ip6_route_del(struct fib6_config *cfg,
3139 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003140{
Wei Wang2b760fc2017-10-06 12:06:03 -07003141 struct rt6_info *rt, *rt_cache;
Thomas Grafc71099a2006-08-04 23:20:06 -07003142 struct fib6_table *table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003143 struct fib6_node *fn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144 int err = -ESRCH;
3145
Daniel Lezcano55786892008-03-04 13:47:47 -08003146 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
David Ahernd5d531c2017-05-21 10:12:05 -06003147 if (!table) {
3148 NL_SET_ERR_MSG(extack, "FIB table does not exist");
Thomas Grafc71099a2006-08-04 23:20:06 -07003149 return err;
David Ahernd5d531c2017-05-21 10:12:05 -06003150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151
Wei Wang66f5d6c2017-10-06 12:06:10 -07003152 rcu_read_lock();
Thomas Grafc71099a2006-08-04 23:20:06 -07003153
3154 fn = fib6_locate(&table->tb6_root,
Thomas Graf86872cb2006-08-22 00:01:08 -07003155 &cfg->fc_dst, cfg->fc_dst_len,
Wei Wang38fbeee2017-10-06 12:06:02 -07003156 &cfg->fc_src, cfg->fc_src_len,
Wei Wang2b760fc2017-10-06 12:06:03 -07003157 !(cfg->fc_flags & RTF_CACHE));
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003158
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159 if (fn) {
Wei Wang66f5d6c2017-10-06 12:06:10 -07003160 for_each_fib6_node_rt_rcu(fn) {
Wei Wang2b760fc2017-10-06 12:06:03 -07003161 if (cfg->fc_flags & RTF_CACHE) {
3162 rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst,
3163 &cfg->fc_src);
3164 if (!rt_cache)
3165 continue;
3166 rt = rt_cache;
3167 }
Thomas Graf86872cb2006-08-22 00:01:08 -07003168 if (cfg->fc_ifindex &&
David Ahern5e670d82018-04-17 17:33:14 -07003169 (!rt->fib6_nh.nh_dev ||
3170 rt->fib6_nh.nh_dev->ifindex != cfg->fc_ifindex))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 continue;
Thomas Graf86872cb2006-08-22 00:01:08 -07003172 if (cfg->fc_flags & RTF_GATEWAY &&
David Ahern5e670d82018-04-17 17:33:14 -07003173 !ipv6_addr_equal(&cfg->fc_gateway, &rt->fib6_nh.nh_gw))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 continue;
Thomas Graf86872cb2006-08-22 00:01:08 -07003175 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176 continue;
Mantas Mc2ed1882016-12-16 10:30:59 +02003177 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
3178 continue;
Wei Wangd3843fe2017-10-06 12:06:06 -07003179 if (!dst_hold_safe(&rt->dst))
3180 break;
Wei Wang66f5d6c2017-10-06 12:06:10 -07003181 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003182
David Ahern0ae81332017-02-02 12:37:08 -08003183 /* if gateway was specified only delete the one hop */
3184 if (cfg->fc_flags & RTF_GATEWAY)
3185 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3186
3187 return __ip6_del_rt_siblings(rt, cfg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 }
3189 }
Wei Wang66f5d6c2017-10-06 12:06:10 -07003190 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003191
3192 return err;
3193}
3194
David S. Miller6700c272012-07-17 03:29:28 -07003195static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
YOSHIFUJI Hideakia6279452006-08-23 17:18:26 -07003196{
YOSHIFUJI Hideakia6279452006-08-23 17:18:26 -07003197 struct netevent_redirect netevent;
David S. Millere8599ff2012-07-11 23:43:53 -07003198 struct rt6_info *rt, *nrt = NULL;
David S. Millere8599ff2012-07-11 23:43:53 -07003199 struct ndisc_options ndopts;
3200 struct inet6_dev *in6_dev;
3201 struct neighbour *neigh;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003202 struct rd_msg *msg;
David S. Miller6e157b62012-07-12 00:05:02 -07003203 int optlen, on_link;
3204 u8 *lladdr;
David S. Millere8599ff2012-07-11 23:43:53 -07003205
Simon Horman29a3cad2013-05-28 20:34:26 +00003206 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003207 optlen -= sizeof(*msg);
David S. Millere8599ff2012-07-11 23:43:53 -07003208
3209 if (optlen < 0) {
David S. Miller6e157b62012-07-12 00:05:02 -07003210 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003211 return;
3212 }
3213
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003214 msg = (struct rd_msg *)icmp6_hdr(skb);
David S. Millere8599ff2012-07-11 23:43:53 -07003215
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003216 if (ipv6_addr_is_multicast(&msg->dest)) {
David S. Miller6e157b62012-07-12 00:05:02 -07003217 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003218 return;
3219 }
3220
David S. Miller6e157b62012-07-12 00:05:02 -07003221 on_link = 0;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003222 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
David S. Millere8599ff2012-07-11 23:43:53 -07003223 on_link = 1;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003224 } else if (ipv6_addr_type(&msg->target) !=
David S. Millere8599ff2012-07-11 23:43:53 -07003225 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
David S. Miller6e157b62012-07-12 00:05:02 -07003226 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
David S. Millere8599ff2012-07-11 23:43:53 -07003227 return;
3228 }
3229
3230 in6_dev = __in6_dev_get(skb->dev);
3231 if (!in6_dev)
3232 return;
3233 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3234 return;
3235
3236 /* RFC2461 8.1:
3237 * The IP source address of the Redirect MUST be the same as the current
3238 * first-hop router for the specified ICMP Destination Address.
3239 */
3240
Alexander Aringf997c552016-06-15 21:20:23 +02003241 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
David S. Millere8599ff2012-07-11 23:43:53 -07003242 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3243 return;
3244 }
David S. Miller6e157b62012-07-12 00:05:02 -07003245
3246 lladdr = NULL;
David S. Millere8599ff2012-07-11 23:43:53 -07003247 if (ndopts.nd_opts_tgt_lladdr) {
3248 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3249 skb->dev);
3250 if (!lladdr) {
3251 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3252 return;
3253 }
3254 }
3255
David S. Miller6e157b62012-07-12 00:05:02 -07003256 rt = (struct rt6_info *) dst;
Matthias Schifferec13ad12015-11-02 01:24:38 +01003257 if (rt->rt6i_flags & RTF_REJECT) {
David S. Miller6e157b62012-07-12 00:05:02 -07003258 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3259 return;
3260 }
3261
3262 /* Redirect received -> path was valid.
3263 * Look, redirects are sent only in response to data packets,
3264 * so that this nexthop apparently is reachable. --ANK
3265 */
Julian Anastasov0dec8792017-02-06 23:14:16 +02003266 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
David S. Miller6e157b62012-07-12 00:05:02 -07003267
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003268 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
David S. Millere8599ff2012-07-11 23:43:53 -07003269 if (!neigh)
3270 return;
3271
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272 /*
3273 * We have finally decided to accept it.
3274 */
3275
Alexander Aringf997c552016-06-15 21:20:23 +02003276 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 NEIGH_UPDATE_F_WEAK_OVERRIDE|
3278 NEIGH_UPDATE_F_OVERRIDE|
3279 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
Alexander Aringf997c552016-06-15 21:20:23 +02003280 NEIGH_UPDATE_F_ISROUTER)),
3281 NDISC_REDIRECT, &ndopts);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282
Martin KaFai Lau83a09ab2015-05-22 20:56:05 -07003283 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
David S. Miller38308472011-12-03 18:02:47 -05003284 if (!nrt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 goto out;
3286
3287 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3288 if (on_link)
3289 nrt->rt6i_flags &= ~RTF_GATEWAY;
3290
Xin Longb91d5322017-08-03 14:13:46 +08003291 nrt->rt6i_protocol = RTPROT_REDIRECT;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003292 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293
Wei Wang2b760fc2017-10-06 12:06:03 -07003294 /* No need to remove rt from the exception table if rt is
3295 * a cached route because rt6_insert_exception() will
3296 * takes care of it
3297 */
David Ahernd4ead6b2018-04-17 17:33:16 -07003298 if (rt6_insert_exception(nrt, rt->from)) {
Wei Wang2b760fc2017-10-06 12:06:03 -07003299 dst_release_immediate(&nrt->dst);
3300 goto out;
3301 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003302
Changli Gaod8d1f302010-06-10 23:31:35 -07003303 netevent.old = &rt->dst;
3304 netevent.new = &nrt->dst;
YOSHIFUJI Hideaki / 吉藤英明71bcdba2013-01-05 16:34:51 +00003305 netevent.daddr = &msg->dest;
YOSHIFUJI Hideaki / 吉藤英明60592832013-01-14 09:28:27 +00003306 netevent.neigh = neigh;
Tom Tucker8d717402006-07-30 20:43:36 -07003307 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3308
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309out:
David S. Millere8599ff2012-07-11 23:43:53 -07003310 neigh_release(neigh);
David S. Miller6e157b62012-07-12 00:05:02 -07003311}
3312
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003313#ifdef CONFIG_IPV6_ROUTE_INFO
Daniel Lezcanoefa2cea2008-03-04 13:46:48 -08003314static struct rt6_info *rt6_get_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +00003315 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -07003316 const struct in6_addr *gwaddr,
3317 struct net_device *dev)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003318{
David Ahern830218c2016-10-24 10:52:35 -07003319 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3320 int ifindex = dev->ifindex;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003321 struct fib6_node *fn;
3322 struct rt6_info *rt = NULL;
Thomas Grafc71099a2006-08-04 23:20:06 -07003323 struct fib6_table *table;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003324
David Ahern830218c2016-10-24 10:52:35 -07003325 table = fib6_get_table(net, tb_id);
David S. Miller38308472011-12-03 18:02:47 -05003326 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07003327 return NULL;
3328
Wei Wang66f5d6c2017-10-06 12:06:10 -07003329 rcu_read_lock();
Wei Wang38fbeee2017-10-06 12:06:02 -07003330 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003331 if (!fn)
3332 goto out;
3333
Wei Wang66f5d6c2017-10-06 12:06:10 -07003334 for_each_fib6_node_rt_rcu(fn) {
David Ahern5e670d82018-04-17 17:33:14 -07003335 if (rt->fib6_nh.nh_dev->ifindex != ifindex)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003336 continue;
3337 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
3338 continue;
David Ahern5e670d82018-04-17 17:33:14 -07003339 if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003340 continue;
Wei Wangd3843fe2017-10-06 12:06:06 -07003341 ip6_hold_safe(NULL, &rt, false);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003342 break;
3343 }
3344out:
Wei Wang66f5d6c2017-10-06 12:06:10 -07003345 rcu_read_unlock();
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003346 return rt;
3347}
3348
Daniel Lezcanoefa2cea2008-03-04 13:46:48 -08003349static struct rt6_info *rt6_add_route_info(struct net *net,
Eric Dumazetb71d1d42011-04-22 04:53:02 +00003350 const struct in6_addr *prefix, int prefixlen,
David Ahern830218c2016-10-24 10:52:35 -07003351 const struct in6_addr *gwaddr,
3352 struct net_device *dev,
Eric Dumazet95c96172012-04-15 05:58:06 +00003353 unsigned int pref)
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003354{
Thomas Graf86872cb2006-08-22 00:01:08 -07003355 struct fib6_config cfg = {
Rami Rosen238fc7e2008-02-09 23:43:11 -08003356 .fc_metric = IP6_RT_PRIO_USER,
David Ahern830218c2016-10-24 10:52:35 -07003357 .fc_ifindex = dev->ifindex,
Thomas Graf86872cb2006-08-22 00:01:08 -07003358 .fc_dst_len = prefixlen,
3359 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3360 RTF_UP | RTF_PREF(pref),
Xin Longb91d5322017-08-03 14:13:46 +08003361 .fc_protocol = RTPROT_RA,
David Aherne8478e82018-04-17 17:33:13 -07003362 .fc_type = RTN_UNICAST,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003363 .fc_nlinfo.portid = 0,
Daniel Lezcanoefa2cea2008-03-04 13:46:48 -08003364 .fc_nlinfo.nlh = NULL,
3365 .fc_nlinfo.nl_net = net,
Thomas Graf86872cb2006-08-22 00:01:08 -07003366 };
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003367
David Ahern830218c2016-10-24 10:52:35 -07003368 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003369 cfg.fc_dst = *prefix;
3370 cfg.fc_gateway = *gwaddr;
Thomas Graf86872cb2006-08-22 00:01:08 -07003371
YOSHIFUJI Hideakie317da92006-03-20 17:06:42 -08003372 /* We should treat it as a default route if prefix length is 0. */
3373 if (!prefixlen)
Thomas Graf86872cb2006-08-22 00:01:08 -07003374 cfg.fc_flags |= RTF_DEFAULT;
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003375
David Ahern333c4302017-05-21 10:12:04 -06003376 ip6_route_add(&cfg, NULL);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003377
David Ahern830218c2016-10-24 10:52:35 -07003378 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
YOSHIFUJI Hideaki70ceb4f2006-03-20 17:06:24 -08003379}
3380#endif
3381
David Ahernafb1d4b52018-04-17 17:33:11 -07003382struct rt6_info *rt6_get_dflt_router(struct net *net,
3383 const struct in6_addr *addr,
3384 struct net_device *dev)
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09003385{
David Ahern830218c2016-10-24 10:52:35 -07003386 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003387 struct rt6_info *rt;
Thomas Grafc71099a2006-08-04 23:20:06 -07003388 struct fib6_table *table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389
David Ahernafb1d4b52018-04-17 17:33:11 -07003390 table = fib6_get_table(net, tb_id);
David S. Miller38308472011-12-03 18:02:47 -05003391 if (!table)
Thomas Grafc71099a2006-08-04 23:20:06 -07003392 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393
Wei Wang66f5d6c2017-10-06 12:06:10 -07003394 rcu_read_lock();
3395 for_each_fib6_node_rt_rcu(&table->tb6_root) {
David Ahern5e670d82018-04-17 17:33:14 -07003396 if (dev == rt->fib6_nh.nh_dev &&
YOSHIFUJI Hideaki045927f2006-03-20 17:00:48 -08003397 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
David Ahern5e670d82018-04-17 17:33:14 -07003398 ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399 break;
3400 }
3401 if (rt)
Wei Wangd3843fe2017-10-06 12:06:06 -07003402 ip6_hold_safe(NULL, &rt, false);
Wei Wang66f5d6c2017-10-06 12:06:10 -07003403 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003404 return rt;
3405}
3406
David Ahernafb1d4b52018-04-17 17:33:11 -07003407struct rt6_info *rt6_add_dflt_router(struct net *net,
3408 const struct in6_addr *gwaddr,
YOSHIFUJI Hideakiebacaaa2006-03-20 17:04:53 -08003409 struct net_device *dev,
3410 unsigned int pref)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411{
Thomas Graf86872cb2006-08-22 00:01:08 -07003412 struct fib6_config cfg = {
David Ahernca254492015-10-12 11:47:10 -07003413 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
Rami Rosen238fc7e2008-02-09 23:43:11 -08003414 .fc_metric = IP6_RT_PRIO_USER,
Thomas Graf86872cb2006-08-22 00:01:08 -07003415 .fc_ifindex = dev->ifindex,
3416 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3417 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
Xin Longb91d5322017-08-03 14:13:46 +08003418 .fc_protocol = RTPROT_RA,
David Aherne8478e82018-04-17 17:33:13 -07003419 .fc_type = RTN_UNICAST,
Eric W. Biederman15e47302012-09-07 20:12:54 +00003420 .fc_nlinfo.portid = 0,
Daniel Lezcano55786892008-03-04 13:47:47 -08003421 .fc_nlinfo.nlh = NULL,
David Ahernafb1d4b52018-04-17 17:33:11 -07003422 .fc_nlinfo.nl_net = net,
Thomas Graf86872cb2006-08-22 00:01:08 -07003423 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003425 cfg.fc_gateway = *gwaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003426
David Ahern333c4302017-05-21 10:12:04 -06003427 if (!ip6_route_add(&cfg, NULL)) {
David Ahern830218c2016-10-24 10:52:35 -07003428 struct fib6_table *table;
3429
3430 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3431 if (table)
3432 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003434
David Ahernafb1d4b52018-04-17 17:33:11 -07003435 return rt6_get_dflt_router(net, gwaddr, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436}
3437
David Ahernafb1d4b52018-04-17 17:33:11 -07003438static void __rt6_purge_dflt_routers(struct net *net,
3439 struct fib6_table *table)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003440{
3441 struct rt6_info *rt;
3442
3443restart:
Wei Wang66f5d6c2017-10-06 12:06:10 -07003444 rcu_read_lock();
3445 for_each_fib6_node_rt_rcu(&table->tb6_root) {
Lorenzo Colitti3e8b0ac2013-03-03 20:46:46 +00003446 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3447 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
Wei Wangd3843fe2017-10-06 12:06:06 -07003448 if (dst_hold_safe(&rt->dst)) {
Wei Wang66f5d6c2017-10-06 12:06:10 -07003449 rcu_read_unlock();
David Ahernafb1d4b52018-04-17 17:33:11 -07003450 ip6_del_rt(net, rt);
Wei Wangd3843fe2017-10-06 12:06:06 -07003451 } else {
Wei Wang66f5d6c2017-10-06 12:06:10 -07003452 rcu_read_unlock();
Wei Wangd3843fe2017-10-06 12:06:06 -07003453 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454 goto restart;
3455 }
3456 }
Wei Wang66f5d6c2017-10-06 12:06:10 -07003457 rcu_read_unlock();
David Ahern830218c2016-10-24 10:52:35 -07003458
3459 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3460}
3461
3462void rt6_purge_dflt_routers(struct net *net)
3463{
3464 struct fib6_table *table;
3465 struct hlist_head *head;
3466 unsigned int h;
3467
3468 rcu_read_lock();
3469
3470 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3471 head = &net->ipv6.fib_table_hash[h];
3472 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3473 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
David Ahernafb1d4b52018-04-17 17:33:11 -07003474 __rt6_purge_dflt_routers(net, table);
David Ahern830218c2016-10-24 10:52:35 -07003475 }
3476 }
3477
3478 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479}
3480
Daniel Lezcano55786892008-03-04 13:47:47 -08003481static void rtmsg_to_fib6_config(struct net *net,
3482 struct in6_rtmsg *rtmsg,
Thomas Graf86872cb2006-08-22 00:01:08 -07003483 struct fib6_config *cfg)
3484{
3485 memset(cfg, 0, sizeof(*cfg));
3486
David Ahernca254492015-10-12 11:47:10 -07003487 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3488 : RT6_TABLE_MAIN;
Thomas Graf86872cb2006-08-22 00:01:08 -07003489 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
3490 cfg->fc_metric = rtmsg->rtmsg_metric;
3491 cfg->fc_expires = rtmsg->rtmsg_info;
3492 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
3493 cfg->fc_src_len = rtmsg->rtmsg_src_len;
3494 cfg->fc_flags = rtmsg->rtmsg_flags;
David Aherne8478e82018-04-17 17:33:13 -07003495 cfg->fc_type = rtmsg->rtmsg_type;
Thomas Graf86872cb2006-08-22 00:01:08 -07003496
Daniel Lezcano55786892008-03-04 13:47:47 -08003497 cfg->fc_nlinfo.nl_net = net;
Benjamin Theryf1243c22008-02-26 18:10:03 -08003498
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003499 cfg->fc_dst = rtmsg->rtmsg_dst;
3500 cfg->fc_src = rtmsg->rtmsg_src;
3501 cfg->fc_gateway = rtmsg->rtmsg_gateway;
Thomas Graf86872cb2006-08-22 00:01:08 -07003502}
3503
Daniel Lezcano55786892008-03-04 13:47:47 -08003504int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505{
Thomas Graf86872cb2006-08-22 00:01:08 -07003506 struct fib6_config cfg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 struct in6_rtmsg rtmsg;
3508 int err;
3509
Ian Morris67ba4152014-08-24 21:53:10 +01003510 switch (cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511 case SIOCADDRT: /* Add a route */
3512 case SIOCDELRT: /* Delete a route */
Eric W. Biedermanaf31f412012-11-16 03:03:06 +00003513 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 return -EPERM;
3515 err = copy_from_user(&rtmsg, arg,
3516 sizeof(struct in6_rtmsg));
3517 if (err)
3518 return -EFAULT;
Thomas Graf86872cb2006-08-22 00:01:08 -07003519
Daniel Lezcano55786892008-03-04 13:47:47 -08003520 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
Thomas Graf86872cb2006-08-22 00:01:08 -07003521
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 rtnl_lock();
3523 switch (cmd) {
3524 case SIOCADDRT:
David Ahern333c4302017-05-21 10:12:04 -06003525 err = ip6_route_add(&cfg, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003526 break;
3527 case SIOCDELRT:
David Ahern333c4302017-05-21 10:12:04 -06003528 err = ip6_route_del(&cfg, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 break;
3530 default:
3531 err = -EINVAL;
3532 }
3533 rtnl_unlock();
3534
3535 return err;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003536 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537
3538 return -EINVAL;
3539}
3540
3541/*
3542 * Drop the packet on the floor
3543 */
3544
Brian Haleyd5fdd6b2009-06-23 04:31:07 -07003545static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546{
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003547 int type;
Eric Dumazetadf30902009-06-02 05:19:30 +00003548 struct dst_entry *dst = skb_dst(skb);
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003549 switch (ipstats_mib_noroutes) {
3550 case IPSTATS_MIB_INNOROUTES:
Arnaldo Carvalho de Melo0660e032007-04-25 17:54:47 -07003551 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
Ulrich Weber45bb0062010-02-25 23:28:58 +00003552 if (type == IPV6_ADDR_ANY) {
Stephen Suryaputrabdb7cc62018-04-16 13:42:16 -04003553 IP6_INC_STATS(dev_net(dst->dev),
3554 __in6_dev_get_safely(skb->dev),
Denis V. Lunev3bd653c2008-10-08 10:54:51 -07003555 IPSTATS_MIB_INADDRERRORS);
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003556 break;
3557 }
3558 /* FALLTHROUGH */
3559 case IPSTATS_MIB_OUTNOROUTES:
Denis V. Lunev3bd653c2008-10-08 10:54:51 -07003560 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3561 ipstats_mib_noroutes);
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003562 break;
3563 }
Alexey Dobriyan3ffe5332010-02-18 08:25:24 +00003564 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003565 kfree_skb(skb);
3566 return 0;
3567}
3568
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003569static int ip6_pkt_discard(struct sk_buff *skb)
3570{
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003571 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003572}
3573
Eric W. Biedermanede20592015-10-07 16:48:47 -05003574static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003575{
Eric Dumazetadf30902009-06-02 05:19:30 +00003576 skb->dev = skb_dst(skb)->dev;
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003577 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003578}
3579
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003580static int ip6_pkt_prohibit(struct sk_buff *skb)
3581{
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003582 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003583}
3584
Eric W. Biedermanede20592015-10-07 16:48:47 -05003585static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003586{
Eric Dumazetadf30902009-06-02 05:19:30 +00003587 skb->dev = skb_dst(skb)->dev;
YOSHIFUJI Hideaki612f09e2007-04-13 16:18:02 -07003588 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
Thomas Graf9ce8ade2006-10-18 20:46:54 -07003589}
3590
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591/*
3592 * Allocate a dst for local (unicast / anycast) address.
3593 */
3594
David Ahernafb1d4b52018-04-17 17:33:11 -07003595struct rt6_info *addrconf_dst_alloc(struct net *net,
3596 struct inet6_dev *idev,
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597 const struct in6_addr *addr,
David S. Miller8f031512011-12-06 16:48:14 -05003598 bool anycast)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599{
David Ahernca254492015-10-12 11:47:10 -07003600 u32 tb_id;
David Ahern4832c302017-08-17 12:17:20 -07003601 struct net_device *dev = idev->dev;
David Ahern5f02ce242016-09-10 12:09:54 -07003602 struct rt6_info *rt;
3603
David Ahern5f02ce242016-09-10 12:09:54 -07003604 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
Hannes Frederic Sowaa3300ef2013-12-07 03:33:45 +01003605 if (!rt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606 return ERR_PTR(-ENOMEM);
3607
Linus Torvalds1da177e2005-04-16 15:20:36 -07003608 in6_dev_hold(idev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003609 rt->rt6i_idev = idev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610
David Ahern6edb3c92018-04-17 17:33:15 -07003611 rt->dst.flags |= DST_HOST;
David Ahern94b5e0f2017-02-02 08:52:21 -08003612 rt->rt6i_protocol = RTPROT_KERNEL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003613 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
David Aherne8478e82018-04-17 17:33:13 -07003614 if (anycast) {
3615 rt->fib6_type = RTN_ANYCAST;
YOSHIFUJI Hideaki58c4fb82005-12-21 22:56:42 +09003616 rt->rt6i_flags |= RTF_ANYCAST;
David Aherne8478e82018-04-17 17:33:13 -07003617 } else {
3618 rt->fib6_type = RTN_LOCAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619 rt->rt6i_flags |= RTF_LOCAL;
David Aherne8478e82018-04-17 17:33:13 -07003620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003621
David Ahern5e670d82018-04-17 17:33:14 -07003622 rt->fib6_nh.nh_gw = *addr;
3623 rt->fib6_nh.nh_dev = dev;
Julian Anastasov550bab42013-10-20 15:43:04 +03003624 rt->rt6i_gateway = *addr;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00003625 rt->rt6i_dst.addr = *addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003626 rt->rt6i_dst.plen = 128;
David Ahernca254492015-10-12 11:47:10 -07003627 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
3628 rt->rt6i_table = fib6_get_table(net, tb_id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 return rt;
3631}
3632
Daniel Walterc3968a82011-04-13 21:10:57 +00003633/* remove deleted ip from prefsrc entries */
3634struct arg_dev_net_ip {
3635 struct net_device *dev;
3636 struct net *net;
3637 struct in6_addr *addr;
3638};
3639
3640static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
3641{
3642 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3643 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3644 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3645
David Ahern5e670d82018-04-17 17:33:14 -07003646 if (((void *)rt->fib6_nh.nh_dev == dev || !dev) &&
David Ahern421842e2018-04-17 17:33:18 -07003647 rt != net->ipv6.fib6_null_entry &&
Daniel Walterc3968a82011-04-13 21:10:57 +00003648 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
Wei Wang60006a42017-10-06 12:05:58 -07003649 spin_lock_bh(&rt6_exception_lock);
Daniel Walterc3968a82011-04-13 21:10:57 +00003650 /* remove prefsrc entry */
3651 rt->rt6i_prefsrc.plen = 0;
Wei Wang60006a42017-10-06 12:05:58 -07003652 /* need to update cache as well */
3653 rt6_exceptions_remove_prefsrc(rt);
3654 spin_unlock_bh(&rt6_exception_lock);
Daniel Walterc3968a82011-04-13 21:10:57 +00003655 }
3656 return 0;
3657}
3658
3659void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
3660{
3661 struct net *net = dev_net(ifp->idev->dev);
3662 struct arg_dev_net_ip adni = {
3663 .dev = ifp->idev->dev,
3664 .net = net,
3665 .addr = &ifp->addr,
3666 };
Li RongQing0c3584d2013-12-27 16:32:38 +08003667 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
Daniel Walterc3968a82011-04-13 21:10:57 +00003668}
3669
Duan Jiongbe7a0102014-05-15 15:56:14 +08003670#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
Duan Jiongbe7a0102014-05-15 15:56:14 +08003671
3672/* Remove routers and update dst entries when gateway turn into host. */
3673static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
3674{
3675 struct in6_addr *gateway = (struct in6_addr *)arg;
3676
Wei Wang2b760fc2017-10-06 12:06:03 -07003677 if (((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
David Ahern5e670d82018-04-17 17:33:14 -07003678 ipv6_addr_equal(gateway, &rt->fib6_nh.nh_gw)) {
Duan Jiongbe7a0102014-05-15 15:56:14 +08003679 return -1;
3680 }
Wei Wangb16cb452017-10-06 12:06:00 -07003681
3682 /* Further clean up cached routes in exception table.
3683 * This is needed because cached route may have a different
3684 * gateway than its 'parent' in the case of an ip redirect.
3685 */
3686 rt6_exceptions_clean_tohost(rt, gateway);
3687
Duan Jiongbe7a0102014-05-15 15:56:14 +08003688 return 0;
3689}
3690
3691void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
3692{
3693 fib6_clean_all(net, fib6_clean_tohost, gateway);
3694}
3695
Ido Schimmel2127d952018-01-07 12:45:03 +02003696struct arg_netdev_event {
3697 const struct net_device *dev;
Ido Schimmel4c981e22018-01-07 12:45:04 +02003698 union {
3699 unsigned int nh_flags;
3700 unsigned long event;
3701 };
Ido Schimmel2127d952018-01-07 12:45:03 +02003702};
3703
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003704static struct rt6_info *rt6_multipath_first_sibling(const struct rt6_info *rt)
3705{
3706 struct rt6_info *iter;
3707 struct fib6_node *fn;
3708
3709 fn = rcu_dereference_protected(rt->rt6i_node,
3710 lockdep_is_held(&rt->rt6i_table->tb6_lock));
3711 iter = rcu_dereference_protected(fn->leaf,
3712 lockdep_is_held(&rt->rt6i_table->tb6_lock));
3713 while (iter) {
3714 if (iter->rt6i_metric == rt->rt6i_metric &&
3715 rt6_qualify_for_ecmp(iter))
3716 return iter;
3717 iter = rcu_dereference_protected(iter->rt6_next,
3718 lockdep_is_held(&rt->rt6i_table->tb6_lock));
3719 }
3720
3721 return NULL;
3722}
3723
3724static bool rt6_is_dead(const struct rt6_info *rt)
3725{
David Ahern5e670d82018-04-17 17:33:14 -07003726 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD ||
3727 (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN &&
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003728 rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
3729 return true;
3730
3731 return false;
3732}
3733
3734static int rt6_multipath_total_weight(const struct rt6_info *rt)
3735{
3736 struct rt6_info *iter;
3737 int total = 0;
3738
3739 if (!rt6_is_dead(rt))
David Ahern5e670d82018-04-17 17:33:14 -07003740 total += rt->fib6_nh.nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003741
3742 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) {
3743 if (!rt6_is_dead(iter))
David Ahern5e670d82018-04-17 17:33:14 -07003744 total += iter->fib6_nh.nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003745 }
3746
3747 return total;
3748}
3749
3750static void rt6_upper_bound_set(struct rt6_info *rt, int *weight, int total)
3751{
3752 int upper_bound = -1;
3753
3754 if (!rt6_is_dead(rt)) {
David Ahern5e670d82018-04-17 17:33:14 -07003755 *weight += rt->fib6_nh.nh_weight;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003756 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
3757 total) - 1;
3758 }
David Ahern5e670d82018-04-17 17:33:14 -07003759 atomic_set(&rt->fib6_nh.nh_upper_bound, upper_bound);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003760}
3761
3762static void rt6_multipath_upper_bound_set(struct rt6_info *rt, int total)
3763{
3764 struct rt6_info *iter;
3765 int weight = 0;
3766
3767 rt6_upper_bound_set(rt, &weight, total);
3768
3769 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3770 rt6_upper_bound_set(iter, &weight, total);
3771}
3772
3773void rt6_multipath_rebalance(struct rt6_info *rt)
3774{
3775 struct rt6_info *first;
3776 int total;
3777
3778 /* In case the entire multipath route was marked for flushing,
3779 * then there is no need to rebalance upon the removal of every
3780 * sibling route.
3781 */
3782 if (!rt->rt6i_nsiblings || rt->should_flush)
3783 return;
3784
3785 /* During lookup routes are evaluated in order, so we need to
3786 * make sure upper bounds are assigned from the first sibling
3787 * onwards.
3788 */
3789 first = rt6_multipath_first_sibling(rt);
3790 if (WARN_ON_ONCE(!first))
3791 return;
3792
3793 total = rt6_multipath_total_weight(first);
3794 rt6_multipath_upper_bound_set(first, total);
3795}
3796
Ido Schimmel2127d952018-01-07 12:45:03 +02003797static int fib6_ifup(struct rt6_info *rt, void *p_arg)
3798{
3799 const struct arg_netdev_event *arg = p_arg;
David Ahern7aef6852018-04-17 17:33:10 -07003800 struct net *net = dev_net(arg->dev);
Ido Schimmel2127d952018-01-07 12:45:03 +02003801
David Ahern421842e2018-04-17 17:33:18 -07003802 if (rt != net->ipv6.fib6_null_entry && rt->fib6_nh.nh_dev == arg->dev) {
David Ahern5e670d82018-04-17 17:33:14 -07003803 rt->fib6_nh.nh_flags &= ~arg->nh_flags;
David Ahern7aef6852018-04-17 17:33:10 -07003804 fib6_update_sernum_upto_root(net, rt);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003805 rt6_multipath_rebalance(rt);
Ido Schimmel1de178e2018-01-07 12:45:15 +02003806 }
Ido Schimmel2127d952018-01-07 12:45:03 +02003807
3808 return 0;
3809}
3810
3811void rt6_sync_up(struct net_device *dev, unsigned int nh_flags)
3812{
3813 struct arg_netdev_event arg = {
3814 .dev = dev,
Ido Schimmel6802f3a2018-01-12 22:07:36 +02003815 {
3816 .nh_flags = nh_flags,
3817 },
Ido Schimmel2127d952018-01-07 12:45:03 +02003818 };
3819
3820 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
3821 arg.nh_flags |= RTNH_F_LINKDOWN;
3822
3823 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
3824}
3825
Ido Schimmel1de178e2018-01-07 12:45:15 +02003826static bool rt6_multipath_uses_dev(const struct rt6_info *rt,
3827 const struct net_device *dev)
3828{
3829 struct rt6_info *iter;
3830
David Ahern5e670d82018-04-17 17:33:14 -07003831 if (rt->fib6_nh.nh_dev == dev)
Ido Schimmel1de178e2018-01-07 12:45:15 +02003832 return true;
3833 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
David Ahern5e670d82018-04-17 17:33:14 -07003834 if (iter->fib6_nh.nh_dev == dev)
Ido Schimmel1de178e2018-01-07 12:45:15 +02003835 return true;
3836
3837 return false;
3838}
3839
3840static void rt6_multipath_flush(struct rt6_info *rt)
3841{
3842 struct rt6_info *iter;
3843
3844 rt->should_flush = 1;
3845 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3846 iter->should_flush = 1;
3847}
3848
3849static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt,
3850 const struct net_device *down_dev)
3851{
3852 struct rt6_info *iter;
3853 unsigned int dead = 0;
3854
David Ahern5e670d82018-04-17 17:33:14 -07003855 if (rt->fib6_nh.nh_dev == down_dev ||
3856 rt->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmel1de178e2018-01-07 12:45:15 +02003857 dead++;
3858 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
David Ahern5e670d82018-04-17 17:33:14 -07003859 if (iter->fib6_nh.nh_dev == down_dev ||
3860 iter->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmel1de178e2018-01-07 12:45:15 +02003861 dead++;
3862
3863 return dead;
3864}
3865
3866static void rt6_multipath_nh_flags_set(struct rt6_info *rt,
3867 const struct net_device *dev,
3868 unsigned int nh_flags)
3869{
3870 struct rt6_info *iter;
3871
David Ahern5e670d82018-04-17 17:33:14 -07003872 if (rt->fib6_nh.nh_dev == dev)
3873 rt->fib6_nh.nh_flags |= nh_flags;
Ido Schimmel1de178e2018-01-07 12:45:15 +02003874 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
David Ahern5e670d82018-04-17 17:33:14 -07003875 if (iter->fib6_nh.nh_dev == dev)
3876 iter->fib6_nh.nh_flags |= nh_flags;
Ido Schimmel1de178e2018-01-07 12:45:15 +02003877}
3878
David Aherna1a22c12017-01-18 07:40:36 -08003879/* called with write lock held for table with rt */
Ido Schimmel4c981e22018-01-07 12:45:04 +02003880static int fib6_ifdown(struct rt6_info *rt, void *p_arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003881{
Ido Schimmel4c981e22018-01-07 12:45:04 +02003882 const struct arg_netdev_event *arg = p_arg;
3883 const struct net_device *dev = arg->dev;
David Ahern7aef6852018-04-17 17:33:10 -07003884 struct net *net = dev_net(dev);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08003885
David Ahern421842e2018-04-17 17:33:18 -07003886 if (rt == net->ipv6.fib6_null_entry)
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003887 return 0;
3888
3889 switch (arg->event) {
3890 case NETDEV_UNREGISTER:
David Ahern5e670d82018-04-17 17:33:14 -07003891 return rt->fib6_nh.nh_dev == dev ? -1 : 0;
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003892 case NETDEV_DOWN:
Ido Schimmel1de178e2018-01-07 12:45:15 +02003893 if (rt->should_flush)
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003894 return -1;
Ido Schimmel1de178e2018-01-07 12:45:15 +02003895 if (!rt->rt6i_nsiblings)
David Ahern5e670d82018-04-17 17:33:14 -07003896 return rt->fib6_nh.nh_dev == dev ? -1 : 0;
Ido Schimmel1de178e2018-01-07 12:45:15 +02003897 if (rt6_multipath_uses_dev(rt, dev)) {
3898 unsigned int count;
3899
3900 count = rt6_multipath_dead_count(rt, dev);
3901 if (rt->rt6i_nsiblings + 1 == count) {
3902 rt6_multipath_flush(rt);
3903 return -1;
3904 }
3905 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
3906 RTNH_F_LINKDOWN);
David Ahern7aef6852018-04-17 17:33:10 -07003907 fib6_update_sernum(net, rt);
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003908 rt6_multipath_rebalance(rt);
Ido Schimmel1de178e2018-01-07 12:45:15 +02003909 }
3910 return -2;
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003911 case NETDEV_CHANGE:
David Ahern5e670d82018-04-17 17:33:14 -07003912 if (rt->fib6_nh.nh_dev != dev ||
Ido Schimmel1de178e2018-01-07 12:45:15 +02003913 rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003914 break;
David Ahern5e670d82018-04-17 17:33:14 -07003915 rt->fib6_nh.nh_flags |= RTNH_F_LINKDOWN;
Ido Schimmeld7dedee2018-01-09 16:40:25 +02003916 rt6_multipath_rebalance(rt);
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003917 break;
Ido Schimmel2b241362018-01-07 12:45:02 +02003918 }
David S. Millerc159d302011-12-26 15:24:36 -05003919
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920 return 0;
3921}
3922
Ido Schimmel27c6fa72018-01-07 12:45:05 +02003923void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924{
Ido Schimmel4c981e22018-01-07 12:45:04 +02003925 struct arg_netdev_event arg = {
Daniel Lezcano8ed67782008-03-04 13:48:30 -08003926 .dev = dev,
Ido Schimmel6802f3a2018-01-12 22:07:36 +02003927 {
3928 .event = event,
3929 },
Daniel Lezcano8ed67782008-03-04 13:48:30 -08003930 };
3931
Ido Schimmel4c981e22018-01-07 12:45:04 +02003932 fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
3933}
3934
3935void rt6_disable_ip(struct net_device *dev, unsigned long event)
3936{
3937 rt6_sync_down_dev(dev, event);
3938 rt6_uncached_list_flush_dev(dev_net(dev), dev);
3939 neigh_ifdown(&nd_tbl, dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940}
3941
Eric Dumazet95c96172012-04-15 05:58:06 +00003942struct rt6_mtu_change_arg {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943 struct net_device *dev;
Eric Dumazet95c96172012-04-15 05:58:06 +00003944 unsigned int mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945};
3946
3947static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
3948{
3949 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
3950 struct inet6_dev *idev;
3951
3952 /* In IPv6 pmtu discovery is not optional,
3953 so that RTAX_MTU lock cannot disable it.
3954 We still use this lock to block changes
3955 caused by addrconf/ndisc.
3956 */
3957
3958 idev = __in6_dev_get(arg->dev);
David S. Miller38308472011-12-03 18:02:47 -05003959 if (!idev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960 return 0;
3961
3962 /* For administrative MTU increase, there is no way to discover
3963 IPv6 PMTU increase, so PMTU increase should be updated here.
3964 Since RFC 1981 doesn't include administrative MTU increase
3965 update PMTU increase is a MUST. (i.e. jumbo frame)
3966 */
David Ahern5e670d82018-04-17 17:33:14 -07003967 if (rt->fib6_nh.nh_dev == arg->dev &&
David Ahernd4ead6b2018-04-17 17:33:16 -07003968 !fib6_metric_locked(rt, RTAX_MTU)) {
3969 u32 mtu = rt->fib6_pmtu;
3970
3971 if (mtu >= arg->mtu ||
3972 (mtu < arg->mtu && mtu == idev->cnf.mtu6))
3973 fib6_metric_set(rt, RTAX_MTU, arg->mtu);
3974
Wei Wangf5bbe7e2017-10-06 12:05:59 -07003975 spin_lock_bh(&rt6_exception_lock);
Stefano Brivioe9fa1492018-03-06 11:10:19 +01003976 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
Wei Wangf5bbe7e2017-10-06 12:05:59 -07003977 spin_unlock_bh(&rt6_exception_lock);
Simon Arlott566cfd82007-07-26 00:09:55 -07003978 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 return 0;
3980}
3981
Eric Dumazet95c96172012-04-15 05:58:06 +00003982void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003983{
Thomas Grafc71099a2006-08-04 23:20:06 -07003984 struct rt6_mtu_change_arg arg = {
3985 .dev = dev,
3986 .mtu = mtu,
3987 };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003988
Li RongQing0c3584d2013-12-27 16:32:38 +08003989 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003990}
3991
Patrick McHardyef7c79e2007-06-05 12:38:30 -07003992static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
Thomas Graf5176f912006-08-26 20:13:18 -07003993 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
Thomas Graf86872cb2006-08-22 00:01:08 -07003994 [RTA_OIF] = { .type = NLA_U32 },
Thomas Grafab364a62006-08-22 00:01:47 -07003995 [RTA_IIF] = { .type = NLA_U32 },
Thomas Graf86872cb2006-08-22 00:01:08 -07003996 [RTA_PRIORITY] = { .type = NLA_U32 },
3997 [RTA_METRICS] = { .type = NLA_NESTED },
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00003998 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01003999 [RTA_PREF] = { .type = NLA_U8 },
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004000 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
4001 [RTA_ENCAP] = { .type = NLA_NESTED },
Xin Long32bc2012015-12-16 17:50:11 +08004002 [RTA_EXPIRES] = { .type = NLA_U32 },
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +09004003 [RTA_UID] = { .type = NLA_U32 },
Liping Zhang3b45a412017-02-27 20:59:39 +08004004 [RTA_MARK] = { .type = NLA_U32 },
Thomas Graf86872cb2006-08-22 00:01:08 -07004005};
4006
4007static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
David Ahern333c4302017-05-21 10:12:04 -06004008 struct fib6_config *cfg,
4009 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010{
Thomas Graf86872cb2006-08-22 00:01:08 -07004011 struct rtmsg *rtm;
4012 struct nlattr *tb[RTA_MAX+1];
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004013 unsigned int pref;
Thomas Graf86872cb2006-08-22 00:01:08 -07004014 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015
Johannes Bergfceb6432017-04-12 14:34:07 +02004016 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4017 NULL);
Thomas Graf86872cb2006-08-22 00:01:08 -07004018 if (err < 0)
4019 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020
Thomas Graf86872cb2006-08-22 00:01:08 -07004021 err = -EINVAL;
4022 rtm = nlmsg_data(nlh);
4023 memset(cfg, 0, sizeof(*cfg));
4024
4025 cfg->fc_table = rtm->rtm_table;
4026 cfg->fc_dst_len = rtm->rtm_dst_len;
4027 cfg->fc_src_len = rtm->rtm_src_len;
4028 cfg->fc_flags = RTF_UP;
4029 cfg->fc_protocol = rtm->rtm_protocol;
Nicolas Dichtelef2c7d72012-09-05 02:12:42 +00004030 cfg->fc_type = rtm->rtm_type;
Thomas Graf86872cb2006-08-22 00:01:08 -07004031
Nicolas Dichtelef2c7d72012-09-05 02:12:42 +00004032 if (rtm->rtm_type == RTN_UNREACHABLE ||
4033 rtm->rtm_type == RTN_BLACKHOLE ||
Nicolas Dichtelb4949ab2012-09-06 05:53:35 +00004034 rtm->rtm_type == RTN_PROHIBIT ||
4035 rtm->rtm_type == RTN_THROW)
Thomas Graf86872cb2006-08-22 00:01:08 -07004036 cfg->fc_flags |= RTF_REJECT;
4037
Maciej Żenczykowskiab79ad12010-09-27 00:07:02 +00004038 if (rtm->rtm_type == RTN_LOCAL)
4039 cfg->fc_flags |= RTF_LOCAL;
4040
Martin KaFai Lau1f56a01f2015-04-28 13:03:03 -07004041 if (rtm->rtm_flags & RTM_F_CLONED)
4042 cfg->fc_flags |= RTF_CACHE;
4043
David Ahernfc1e64e2018-01-25 16:55:09 -08004044 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4045
Eric W. Biederman15e47302012-09-07 20:12:54 +00004046 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
Thomas Graf86872cb2006-08-22 00:01:08 -07004047 cfg->fc_nlinfo.nlh = nlh;
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09004048 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
Thomas Graf86872cb2006-08-22 00:01:08 -07004049
4050 if (tb[RTA_GATEWAY]) {
Jiri Benc67b61f62015-03-29 16:59:26 +02004051 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
Thomas Graf86872cb2006-08-22 00:01:08 -07004052 cfg->fc_flags |= RTF_GATEWAY;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004054
4055 if (tb[RTA_DST]) {
4056 int plen = (rtm->rtm_dst_len + 7) >> 3;
4057
4058 if (nla_len(tb[RTA_DST]) < plen)
4059 goto errout;
4060
4061 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004063
4064 if (tb[RTA_SRC]) {
4065 int plen = (rtm->rtm_src_len + 7) >> 3;
4066
4067 if (nla_len(tb[RTA_SRC]) < plen)
4068 goto errout;
4069
4070 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004071 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004072
Daniel Walterc3968a82011-04-13 21:10:57 +00004073 if (tb[RTA_PREFSRC])
Jiri Benc67b61f62015-03-29 16:59:26 +02004074 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
Daniel Walterc3968a82011-04-13 21:10:57 +00004075
Thomas Graf86872cb2006-08-22 00:01:08 -07004076 if (tb[RTA_OIF])
4077 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4078
4079 if (tb[RTA_PRIORITY])
4080 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4081
4082 if (tb[RTA_METRICS]) {
4083 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4084 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085 }
Thomas Graf86872cb2006-08-22 00:01:08 -07004086
4087 if (tb[RTA_TABLE])
4088 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4089
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004090 if (tb[RTA_MULTIPATH]) {
4091 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4092 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
David Ahern9ed59592017-01-17 14:57:36 -08004093
4094 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
David Ahernc255bd62017-05-27 16:19:27 -06004095 cfg->fc_mp_len, extack);
David Ahern9ed59592017-01-17 14:57:36 -08004096 if (err < 0)
4097 goto errout;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004098 }
4099
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004100 if (tb[RTA_PREF]) {
4101 pref = nla_get_u8(tb[RTA_PREF]);
4102 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4103 pref != ICMPV6_ROUTER_PREF_HIGH)
4104 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4105 cfg->fc_flags |= RTF_PREF(pref);
4106 }
4107
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004108 if (tb[RTA_ENCAP])
4109 cfg->fc_encap = tb[RTA_ENCAP];
4110
David Ahern9ed59592017-01-17 14:57:36 -08004111 if (tb[RTA_ENCAP_TYPE]) {
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004112 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4113
David Ahernc255bd62017-05-27 16:19:27 -06004114 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
David Ahern9ed59592017-01-17 14:57:36 -08004115 if (err < 0)
4116 goto errout;
4117 }
4118
Xin Long32bc2012015-12-16 17:50:11 +08004119 if (tb[RTA_EXPIRES]) {
4120 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4121
4122 if (addrconf_finite_timeout(timeout)) {
4123 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4124 cfg->fc_flags |= RTF_EXPIRES;
4125 }
4126 }
4127
Thomas Graf86872cb2006-08-22 00:01:08 -07004128 err = 0;
4129errout:
4130 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131}
4132
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004133struct rt6_nh {
4134 struct rt6_info *rt6_info;
4135 struct fib6_config r_cfg;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004136 struct list_head next;
4137};
4138
4139static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
4140{
4141 struct rt6_nh *nh;
4142
4143 list_for_each_entry(nh, rt6_nh_list, next) {
David Ahern7d4d5062017-02-02 12:37:12 -08004144 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004145 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
4146 nh->r_cfg.fc_ifindex);
4147 }
4148}
4149
David Ahernd4ead6b2018-04-17 17:33:16 -07004150static int ip6_route_info_append(struct net *net,
4151 struct list_head *rt6_nh_list,
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004152 struct rt6_info *rt, struct fib6_config *r_cfg)
4153{
4154 struct rt6_nh *nh;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004155 int err = -EEXIST;
4156
4157 list_for_each_entry(nh, rt6_nh_list, next) {
4158 /* check if rt6_info already exists */
David Ahernf06b7542017-07-05 14:41:46 -06004159 if (rt6_duplicate_nexthop(nh->rt6_info, rt))
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004160 return err;
4161 }
4162
4163 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
4164 if (!nh)
4165 return -ENOMEM;
4166 nh->rt6_info = rt;
David Ahernd4ead6b2018-04-17 17:33:16 -07004167 err = ip6_convert_metrics(net, rt, r_cfg);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004168 if (err) {
4169 kfree(nh);
4170 return err;
4171 }
4172 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4173 list_add_tail(&nh->next, rt6_nh_list);
4174
4175 return 0;
4176}
4177
David Ahern3b1137f2017-02-02 12:37:10 -08004178static void ip6_route_mpath_notify(struct rt6_info *rt,
4179 struct rt6_info *rt_last,
4180 struct nl_info *info,
4181 __u16 nlflags)
4182{
4183 /* if this is an APPEND route, then rt points to the first route
4184 * inserted and rt_last points to last route inserted. Userspace
4185 * wants a consistent dump of the route which starts at the first
4186 * nexthop. Since sibling routes are always added at the end of
4187 * the list, find the first sibling of the last route appended
4188 */
4189 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
4190 rt = list_first_entry(&rt_last->rt6i_siblings,
4191 struct rt6_info,
4192 rt6i_siblings);
4193 }
4194
4195 if (rt)
4196 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
4197}
4198
David Ahern333c4302017-05-21 10:12:04 -06004199static int ip6_route_multipath_add(struct fib6_config *cfg,
4200 struct netlink_ext_ack *extack)
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004201{
David Ahern3b1137f2017-02-02 12:37:10 -08004202 struct rt6_info *rt_notif = NULL, *rt_last = NULL;
4203 struct nl_info *info = &cfg->fc_nlinfo;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004204 struct fib6_config r_cfg;
4205 struct rtnexthop *rtnh;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004206 struct rt6_info *rt;
4207 struct rt6_nh *err_nh;
4208 struct rt6_nh *nh, *nh_safe;
David Ahern3b1137f2017-02-02 12:37:10 -08004209 __u16 nlflags;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004210 int remaining;
4211 int attrlen;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004212 int err = 1;
4213 int nhn = 0;
4214 int replace = (cfg->fc_nlinfo.nlh &&
4215 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
4216 LIST_HEAD(rt6_nh_list);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004217
David Ahern3b1137f2017-02-02 12:37:10 -08004218 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
4219 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
4220 nlflags |= NLM_F_APPEND;
4221
Michal Kubeček35f1b4e2015-05-18 20:53:55 +02004222 remaining = cfg->fc_mp_len;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004223 rtnh = (struct rtnexthop *)cfg->fc_mp;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004224
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004225 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
4226 * rt6_info structs per nexthop
4227 */
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004228 while (rtnh_ok(rtnh, remaining)) {
4229 memcpy(&r_cfg, cfg, sizeof(*cfg));
4230 if (rtnh->rtnh_ifindex)
4231 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4232
4233 attrlen = rtnh_attrlen(rtnh);
4234 if (attrlen > 0) {
4235 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4236
4237 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4238 if (nla) {
Jiri Benc67b61f62015-03-29 16:59:26 +02004239 r_cfg.fc_gateway = nla_get_in6_addr(nla);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004240 r_cfg.fc_flags |= RTF_GATEWAY;
4241 }
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004242 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
4243 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
4244 if (nla)
4245 r_cfg.fc_encap_type = nla_get_u16(nla);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004246 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004247
David Ahern68e2ffd2018-03-20 10:06:59 -07004248 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
David Ahern333c4302017-05-21 10:12:04 -06004249 rt = ip6_route_info_create(&r_cfg, extack);
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07004250 if (IS_ERR(rt)) {
4251 err = PTR_ERR(rt);
4252 rt = NULL;
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004253 goto cleanup;
Roopa Prabhu8c5b83f2015-10-10 08:26:36 -07004254 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004255
David Ahern5e670d82018-04-17 17:33:14 -07004256 rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1;
Ido Schimmel398958a2018-01-09 16:40:28 +02004257
David Ahernd4ead6b2018-04-17 17:33:16 -07004258 err = ip6_route_info_append(info->nl_net, &rt6_nh_list,
4259 rt, &r_cfg);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004260 if (err) {
Wei Wang587fea72017-06-17 10:42:36 -07004261 dst_release_immediate(&rt->dst);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004262 goto cleanup;
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004263 }
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004264
4265 rtnh = rtnh_next(rtnh, &remaining);
4266 }
4267
David Ahern3b1137f2017-02-02 12:37:10 -08004268 /* for add and replace send one notification with all nexthops.
4269 * Skip the notification in fib6_add_rt2node and send one with
4270 * the full route when done
4271 */
4272 info->skip_notify = 1;
4273
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004274 err_nh = NULL;
4275 list_for_each_entry(nh, &rt6_nh_list, next) {
David Ahern3b1137f2017-02-02 12:37:10 -08004276 rt_last = nh->rt6_info;
David Ahernd4ead6b2018-04-17 17:33:16 -07004277 err = __ip6_ins_rt(nh->rt6_info, info, extack);
David Ahern3b1137f2017-02-02 12:37:10 -08004278 /* save reference to first route for notification */
4279 if (!rt_notif && !err)
4280 rt_notif = nh->rt6_info;
4281
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004282 /* nh->rt6_info is used or freed at this point, reset to NULL*/
4283 nh->rt6_info = NULL;
4284 if (err) {
4285 if (replace && nhn)
4286 ip6_print_replace_route_err(&rt6_nh_list);
4287 err_nh = nh;
4288 goto add_errout;
4289 }
4290
Nicolas Dichtel1a724182012-11-01 22:58:22 +00004291 /* Because each route is added like a single route we remove
Michal Kubeček27596472015-05-18 20:54:00 +02004292 * these flags after the first nexthop: if there is a collision,
4293 * we have already failed to add the first nexthop:
4294 * fib6_add_rt2node() has rejected it; when replacing, old
4295 * nexthops have been replaced by first new, the rest should
4296 * be added to it.
Nicolas Dichtel1a724182012-11-01 22:58:22 +00004297 */
Michal Kubeček27596472015-05-18 20:54:00 +02004298 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
4299 NLM_F_REPLACE);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004300 nhn++;
4301 }
4302
David Ahern3b1137f2017-02-02 12:37:10 -08004303 /* success ... tell user about new route */
4304 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004305 goto cleanup;
4306
4307add_errout:
David Ahern3b1137f2017-02-02 12:37:10 -08004308 /* send notification for routes that were added so that
4309 * the delete notifications sent by ip6_route_del are
4310 * coherent
4311 */
4312 if (rt_notif)
4313 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4314
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004315 /* Delete routes that were already added */
4316 list_for_each_entry(nh, &rt6_nh_list, next) {
4317 if (err_nh == nh)
4318 break;
David Ahern333c4302017-05-21 10:12:04 -06004319 ip6_route_del(&nh->r_cfg, extack);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004320 }
4321
4322cleanup:
4323 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
Wei Wang587fea72017-06-17 10:42:36 -07004324 if (nh->rt6_info)
4325 dst_release_immediate(&nh->rt6_info->dst);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004326 list_del(&nh->next);
4327 kfree(nh);
4328 }
4329
4330 return err;
4331}
4332
David Ahern333c4302017-05-21 10:12:04 -06004333static int ip6_route_multipath_del(struct fib6_config *cfg,
4334 struct netlink_ext_ack *extack)
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004335{
4336 struct fib6_config r_cfg;
4337 struct rtnexthop *rtnh;
4338 int remaining;
4339 int attrlen;
4340 int err = 1, last_err = 0;
4341
4342 remaining = cfg->fc_mp_len;
4343 rtnh = (struct rtnexthop *)cfg->fc_mp;
4344
4345 /* Parse a Multipath Entry */
4346 while (rtnh_ok(rtnh, remaining)) {
4347 memcpy(&r_cfg, cfg, sizeof(*cfg));
4348 if (rtnh->rtnh_ifindex)
4349 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4350
4351 attrlen = rtnh_attrlen(rtnh);
4352 if (attrlen > 0) {
4353 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4354
4355 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4356 if (nla) {
4357 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
4358 r_cfg.fc_flags |= RTF_GATEWAY;
4359 }
4360 }
David Ahern333c4302017-05-21 10:12:04 -06004361 err = ip6_route_del(&r_cfg, extack);
Roopa Prabhu6b9ea5a2015-09-08 10:53:04 -07004362 if (err)
4363 last_err = err;
4364
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004365 rtnh = rtnh_next(rtnh, &remaining);
4366 }
4367
4368 return last_err;
4369}
4370
David Ahernc21ef3e2017-04-16 09:48:24 -07004371static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4372 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004373{
Thomas Graf86872cb2006-08-22 00:01:08 -07004374 struct fib6_config cfg;
4375 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004376
David Ahern333c4302017-05-21 10:12:04 -06004377 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
Thomas Graf86872cb2006-08-22 00:01:08 -07004378 if (err < 0)
4379 return err;
4380
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004381 if (cfg.fc_mp)
David Ahern333c4302017-05-21 10:12:04 -06004382 return ip6_route_multipath_del(&cfg, extack);
David Ahern0ae81332017-02-02 12:37:08 -08004383 else {
4384 cfg.fc_delete_all_nh = 1;
David Ahern333c4302017-05-21 10:12:04 -06004385 return ip6_route_del(&cfg, extack);
David Ahern0ae81332017-02-02 12:37:08 -08004386 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387}
4388
David Ahernc21ef3e2017-04-16 09:48:24 -07004389static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4390 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004391{
Thomas Graf86872cb2006-08-22 00:01:08 -07004392 struct fib6_config cfg;
4393 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004394
David Ahern333c4302017-05-21 10:12:04 -06004395 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
Thomas Graf86872cb2006-08-22 00:01:08 -07004396 if (err < 0)
4397 return err;
4398
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004399 if (cfg.fc_mp)
David Ahern333c4302017-05-21 10:12:04 -06004400 return ip6_route_multipath_add(&cfg, extack);
Nicolas Dichtel51ebd312012-10-22 03:42:09 +00004401 else
David Ahern333c4302017-05-21 10:12:04 -06004402 return ip6_route_add(&cfg, extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403}
4404
David Ahernbeb1afac52017-02-02 12:37:09 -08004405static size_t rt6_nlmsg_size(struct rt6_info *rt)
Thomas Graf339bf982006-11-10 14:10:15 -08004406{
David Ahernbeb1afac52017-02-02 12:37:09 -08004407 int nexthop_len = 0;
4408
4409 if (rt->rt6i_nsiblings) {
4410 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
4411 + NLA_ALIGN(sizeof(struct rtnexthop))
4412 + nla_total_size(16) /* RTA_GATEWAY */
David Ahern5e670d82018-04-17 17:33:14 -07004413 + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate);
David Ahernbeb1afac52017-02-02 12:37:09 -08004414
4415 nexthop_len *= rt->rt6i_nsiblings;
4416 }
4417
Thomas Graf339bf982006-11-10 14:10:15 -08004418 return NLMSG_ALIGN(sizeof(struct rtmsg))
4419 + nla_total_size(16) /* RTA_SRC */
4420 + nla_total_size(16) /* RTA_DST */
4421 + nla_total_size(16) /* RTA_GATEWAY */
4422 + nla_total_size(16) /* RTA_PREFSRC */
4423 + nla_total_size(4) /* RTA_TABLE */
4424 + nla_total_size(4) /* RTA_IIF */
4425 + nla_total_size(4) /* RTA_OIF */
4426 + nla_total_size(4) /* RTA_PRIORITY */
Noriaki TAKAMIYA6a2b9ce2007-01-23 22:09:41 -08004427 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
Daniel Borkmannea697632015-01-05 23:57:47 +01004428 + nla_total_size(sizeof(struct rta_cacheinfo))
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004429 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004430 + nla_total_size(1) /* RTA_PREF */
David Ahern5e670d82018-04-17 17:33:14 -07004431 + lwtunnel_get_encap_size(rt->fib6_nh.nh_lwtstate)
David Ahernbeb1afac52017-02-02 12:37:09 -08004432 + nexthop_len;
4433}
4434
4435static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
David Ahern5be083c2017-03-06 15:57:31 -08004436 unsigned int *flags, bool skip_oif)
David Ahernbeb1afac52017-02-02 12:37:09 -08004437{
David Ahern5e670d82018-04-17 17:33:14 -07004438 if (rt->fib6_nh.nh_flags & RTNH_F_DEAD)
Ido Schimmelf9d882e2018-01-07 12:45:10 +02004439 *flags |= RTNH_F_DEAD;
4440
David Ahern5e670d82018-04-17 17:33:14 -07004441 if (rt->fib6_nh.nh_flags & RTNH_F_LINKDOWN) {
David Ahernbeb1afac52017-02-02 12:37:09 -08004442 *flags |= RTNH_F_LINKDOWN;
4443 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
4444 *flags |= RTNH_F_DEAD;
4445 }
4446
4447 if (rt->rt6i_flags & RTF_GATEWAY) {
David Ahern5e670d82018-04-17 17:33:14 -07004448 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->fib6_nh.nh_gw) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08004449 goto nla_put_failure;
4450 }
4451
David Ahern5e670d82018-04-17 17:33:14 -07004452 *flags |= (rt->fib6_nh.nh_flags & RTNH_F_ONLINK);
4453 if (rt->fib6_nh.nh_flags & RTNH_F_OFFLOAD)
Ido Schimmel61e4d012017-08-03 13:28:20 +02004454 *flags |= RTNH_F_OFFLOAD;
4455
David Ahern5be083c2017-03-06 15:57:31 -08004456 /* not needed for multipath encoding b/c it has a rtnexthop struct */
David Ahern5e670d82018-04-17 17:33:14 -07004457 if (!skip_oif && rt->fib6_nh.nh_dev &&
4458 nla_put_u32(skb, RTA_OIF, rt->fib6_nh.nh_dev->ifindex))
David Ahernbeb1afac52017-02-02 12:37:09 -08004459 goto nla_put_failure;
4460
David Ahern5e670d82018-04-17 17:33:14 -07004461 if (rt->fib6_nh.nh_lwtstate &&
4462 lwtunnel_fill_encap(skb, rt->fib6_nh.nh_lwtstate) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08004463 goto nla_put_failure;
4464
4465 return 0;
4466
4467nla_put_failure:
4468 return -EMSGSIZE;
4469}
4470
David Ahern5be083c2017-03-06 15:57:31 -08004471/* add multipath next hop */
David Ahernbeb1afac52017-02-02 12:37:09 -08004472static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
4473{
David Ahern5e670d82018-04-17 17:33:14 -07004474 const struct net_device *dev = rt->fib6_nh.nh_dev;
David Ahernbeb1afac52017-02-02 12:37:09 -08004475 struct rtnexthop *rtnh;
4476 unsigned int flags = 0;
4477
4478 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
4479 if (!rtnh)
4480 goto nla_put_failure;
4481
David Ahern5e670d82018-04-17 17:33:14 -07004482 rtnh->rtnh_hops = rt->fib6_nh.nh_weight - 1;
4483 rtnh->rtnh_ifindex = dev ? dev->ifindex : 0;
David Ahernbeb1afac52017-02-02 12:37:09 -08004484
David Ahern5be083c2017-03-06 15:57:31 -08004485 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08004486 goto nla_put_failure;
4487
4488 rtnh->rtnh_flags = flags;
4489
4490 /* length of rtnetlink header + attributes */
4491 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
4492
4493 return 0;
4494
4495nla_put_failure:
4496 return -EMSGSIZE;
Thomas Graf339bf982006-11-10 14:10:15 -08004497}
4498
David Ahernd4ead6b2018-04-17 17:33:16 -07004499static int rt6_fill_node(struct net *net, struct sk_buff *skb,
4500 struct rt6_info *rt, struct dst_entry *dst,
4501 struct in6_addr *dest, struct in6_addr *src,
Eric W. Biederman15e47302012-09-07 20:12:54 +00004502 int iif, int type, u32 portid, u32 seq,
David Ahernf8cfe2c2017-01-17 15:51:08 -08004503 unsigned int flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504{
4505 struct rtmsg *rtm;
Thomas Graf2d7202b2006-08-22 00:01:27 -07004506 struct nlmsghdr *nlh;
David Ahernd4ead6b2018-04-17 17:33:16 -07004507 long expires = 0;
4508 u32 *pmetrics;
Patrick McHardy9e762a42006-08-10 23:09:48 -07004509 u32 table;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510
Eric W. Biederman15e47302012-09-07 20:12:54 +00004511 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
David S. Miller38308472011-12-03 18:02:47 -05004512 if (!nlh)
Patrick McHardy26932562007-01-31 23:16:40 -08004513 return -EMSGSIZE;
Thomas Graf2d7202b2006-08-22 00:01:27 -07004514
4515 rtm = nlmsg_data(nlh);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004516 rtm->rtm_family = AF_INET6;
4517 rtm->rtm_dst_len = rt->rt6i_dst.plen;
4518 rtm->rtm_src_len = rt->rt6i_src.plen;
4519 rtm->rtm_tos = 0;
Thomas Grafc71099a2006-08-04 23:20:06 -07004520 if (rt->rt6i_table)
Patrick McHardy9e762a42006-08-10 23:09:48 -07004521 table = rt->rt6i_table->tb6_id;
Thomas Grafc71099a2006-08-04 23:20:06 -07004522 else
Patrick McHardy9e762a42006-08-10 23:09:48 -07004523 table = RT6_TABLE_UNSPEC;
4524 rtm->rtm_table = table;
David S. Millerc78679e2012-04-01 20:27:33 -04004525 if (nla_put_u32(skb, RTA_TABLE, table))
4526 goto nla_put_failure;
David Aherne8478e82018-04-17 17:33:13 -07004527
4528 rtm->rtm_type = rt->fib6_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004529 rtm->rtm_flags = 0;
4530 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4531 rtm->rtm_protocol = rt->rt6i_protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004532
David S. Miller38308472011-12-03 18:02:47 -05004533 if (rt->rt6i_flags & RTF_CACHE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004534 rtm->rtm_flags |= RTM_F_CLONED;
4535
David Ahernd4ead6b2018-04-17 17:33:16 -07004536 if (dest) {
4537 if (nla_put_in6_addr(skb, RTA_DST, dest))
David S. Millerc78679e2012-04-01 20:27:33 -04004538 goto nla_put_failure;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004539 rtm->rtm_dst_len = 128;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540 } else if (rtm->rtm_dst_len)
Jiri Benc930345e2015-03-29 16:59:25 +02004541 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
David S. Millerc78679e2012-04-01 20:27:33 -04004542 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543#ifdef CONFIG_IPV6_SUBTREES
4544 if (src) {
Jiri Benc930345e2015-03-29 16:59:25 +02004545 if (nla_put_in6_addr(skb, RTA_SRC, src))
David S. Millerc78679e2012-04-01 20:27:33 -04004546 goto nla_put_failure;
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004547 rtm->rtm_src_len = 128;
David S. Millerc78679e2012-04-01 20:27:33 -04004548 } else if (rtm->rtm_src_len &&
Jiri Benc930345e2015-03-29 16:59:25 +02004549 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
David S. Millerc78679e2012-04-01 20:27:33 -04004550 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004551#endif
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +09004552 if (iif) {
4553#ifdef CONFIG_IPV6_MROUTE
4554 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
David Ahernfd61c6b2017-01-17 15:51:07 -08004555 int err = ip6mr_get_route(net, skb, rtm, portid);
Nikolay Aleksandrov2cf75072016-09-25 23:08:31 +02004556
David Ahernfd61c6b2017-01-17 15:51:07 -08004557 if (err == 0)
4558 return 0;
4559 if (err < 0)
4560 goto nla_put_failure;
YOSHIFUJI Hideaki7bc570c2008-04-03 09:22:53 +09004561 } else
4562#endif
David S. Millerc78679e2012-04-01 20:27:33 -04004563 if (nla_put_u32(skb, RTA_IIF, iif))
4564 goto nla_put_failure;
David Ahernd4ead6b2018-04-17 17:33:16 -07004565 } else if (dest) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004566 struct in6_addr saddr_buf;
David Ahernd4ead6b2018-04-17 17:33:16 -07004567 if (ip6_route_get_saddr(net, rt, dest, 0, &saddr_buf) == 0 &&
Jiri Benc930345e2015-03-29 16:59:25 +02004568 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
David S. Millerc78679e2012-04-01 20:27:33 -04004569 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004570 }
Thomas Graf2d7202b2006-08-22 00:01:27 -07004571
Daniel Walterc3968a82011-04-13 21:10:57 +00004572 if (rt->rt6i_prefsrc.plen) {
4573 struct in6_addr saddr_buf;
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00004574 saddr_buf = rt->rt6i_prefsrc.addr;
Jiri Benc930345e2015-03-29 16:59:25 +02004575 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
David S. Millerc78679e2012-04-01 20:27:33 -04004576 goto nla_put_failure;
Daniel Walterc3968a82011-04-13 21:10:57 +00004577 }
4578
David Ahernd4ead6b2018-04-17 17:33:16 -07004579 pmetrics = dst ? dst_metrics_ptr(dst) : rt->fib6_metrics->metrics;
4580 if (rtnetlink_put_metrics(skb, pmetrics) < 0)
Thomas Graf2d7202b2006-08-22 00:01:27 -07004581 goto nla_put_failure;
4582
David S. Millerc78679e2012-04-01 20:27:33 -04004583 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
4584 goto nla_put_failure;
Li Wei82539472012-07-29 16:01:30 +00004585
David Ahernbeb1afac52017-02-02 12:37:09 -08004586 /* For multipath routes, walk the siblings list and add
4587 * each as a nexthop within RTA_MULTIPATH.
4588 */
4589 if (rt->rt6i_nsiblings) {
4590 struct rt6_info *sibling, *next_sibling;
4591 struct nlattr *mp;
4592
4593 mp = nla_nest_start(skb, RTA_MULTIPATH);
4594 if (!mp)
4595 goto nla_put_failure;
4596
4597 if (rt6_add_nexthop(skb, rt) < 0)
4598 goto nla_put_failure;
4599
4600 list_for_each_entry_safe(sibling, next_sibling,
4601 &rt->rt6i_siblings, rt6i_siblings) {
4602 if (rt6_add_nexthop(skb, sibling) < 0)
4603 goto nla_put_failure;
4604 }
4605
4606 nla_nest_end(skb, mp);
4607 } else {
David Ahern5be083c2017-03-06 15:57:31 -08004608 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
David Ahernbeb1afac52017-02-02 12:37:09 -08004609 goto nla_put_failure;
4610 }
4611
David Ahern14895682018-04-17 17:33:17 -07004612 if (rt->rt6i_flags & RTF_EXPIRES) {
4613 expires = dst ? dst->expires : rt->expires;
4614 expires -= jiffies;
4615 }
YOSHIFUJI Hideaki69cdf8f2008-05-19 16:55:13 -07004616
David Ahernd4ead6b2018-04-17 17:33:16 -07004617 if (rtnl_put_cacheinfo(skb, dst, 0, expires, dst ? dst->error : 0) < 0)
Thomas Grafe3703b32006-11-27 09:27:07 -08004618 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004619
Lubomir Rintelc78ba6d2015-03-11 15:39:21 +01004620 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
4621 goto nla_put_failure;
4622
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004623
Johannes Berg053c0952015-01-16 22:09:00 +01004624 nlmsg_end(skb, nlh);
4625 return 0;
Thomas Graf2d7202b2006-08-22 00:01:27 -07004626
4627nla_put_failure:
Patrick McHardy26932562007-01-31 23:16:40 -08004628 nlmsg_cancel(skb, nlh);
4629 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004630}
4631
Patrick McHardy1b43af52006-08-10 23:11:17 -07004632int rt6_dump_route(struct rt6_info *rt, void *p_arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004633{
4634 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
David Ahern1f17e2f2017-01-26 13:54:08 -08004635 struct net *net = arg->net;
4636
David Ahern421842e2018-04-17 17:33:18 -07004637 if (rt == net->ipv6.fib6_null_entry)
David Ahern1f17e2f2017-01-26 13:54:08 -08004638 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004639
Thomas Graf2d7202b2006-08-22 00:01:27 -07004640 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
4641 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
David Ahernf8cfe2c2017-01-17 15:51:08 -08004642
4643 /* user wants prefix routes only */
4644 if (rtm->rtm_flags & RTM_F_PREFIX &&
4645 !(rt->rt6i_flags & RTF_PREFIX_RT)) {
4646 /* success since this is not a prefix route */
4647 return 1;
4648 }
4649 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004650
David Ahernd4ead6b2018-04-17 17:33:16 -07004651 return rt6_fill_node(net, arg->skb, rt, NULL, NULL, NULL, 0,
4652 RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid,
4653 arg->cb->nlh->nlmsg_seq, NLM_F_MULTI);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004654}
4655
David Ahernc21ef3e2017-04-16 09:48:24 -07004656static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4657 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004658{
YOSHIFUJI Hideaki3b1e0a62008-03-26 02:26:21 +09004659 struct net *net = sock_net(in_skb->sk);
Thomas Grafab364a62006-08-22 00:01:47 -07004660 struct nlattr *tb[RTA_MAX+1];
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004661 int err, iif = 0, oif = 0;
4662 struct dst_entry *dst;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004663 struct rt6_info *rt;
Thomas Grafab364a62006-08-22 00:01:47 -07004664 struct sk_buff *skb;
4665 struct rtmsg *rtm;
David S. Miller4c9483b2011-03-12 16:22:43 -05004666 struct flowi6 fl6;
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004667 bool fibmatch;
Thomas Grafab364a62006-08-22 00:01:47 -07004668
Johannes Bergfceb6432017-04-12 14:34:07 +02004669 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
David Ahernc21ef3e2017-04-16 09:48:24 -07004670 extack);
Thomas Grafab364a62006-08-22 00:01:47 -07004671 if (err < 0)
4672 goto errout;
4673
4674 err = -EINVAL;
David S. Miller4c9483b2011-03-12 16:22:43 -05004675 memset(&fl6, 0, sizeof(fl6));
Hannes Frederic Sowa38b70972016-06-11 20:08:19 +02004676 rtm = nlmsg_data(nlh);
4677 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004678 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
Thomas Grafab364a62006-08-22 00:01:47 -07004679
4680 if (tb[RTA_SRC]) {
4681 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
4682 goto errout;
4683
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00004684 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
Thomas Grafab364a62006-08-22 00:01:47 -07004685 }
4686
4687 if (tb[RTA_DST]) {
4688 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
4689 goto errout;
4690
Alexey Dobriyan4e3fd7a2011-11-21 03:39:03 +00004691 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
Thomas Grafab364a62006-08-22 00:01:47 -07004692 }
4693
4694 if (tb[RTA_IIF])
4695 iif = nla_get_u32(tb[RTA_IIF]);
4696
4697 if (tb[RTA_OIF])
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00004698 oif = nla_get_u32(tb[RTA_OIF]);
Thomas Grafab364a62006-08-22 00:01:47 -07004699
Lorenzo Colitti2e47b292014-05-15 16:38:41 -07004700 if (tb[RTA_MARK])
4701 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
4702
Lorenzo Colitti622ec2c2016-11-04 02:23:42 +09004703 if (tb[RTA_UID])
4704 fl6.flowi6_uid = make_kuid(current_user_ns(),
4705 nla_get_u32(tb[RTA_UID]));
4706 else
4707 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
4708
Thomas Grafab364a62006-08-22 00:01:47 -07004709 if (iif) {
4710 struct net_device *dev;
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00004711 int flags = 0;
4712
Florian Westphal121622d2017-08-15 16:34:42 +02004713 rcu_read_lock();
4714
4715 dev = dev_get_by_index_rcu(net, iif);
Thomas Grafab364a62006-08-22 00:01:47 -07004716 if (!dev) {
Florian Westphal121622d2017-08-15 16:34:42 +02004717 rcu_read_unlock();
Thomas Grafab364a62006-08-22 00:01:47 -07004718 err = -ENODEV;
4719 goto errout;
4720 }
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00004721
4722 fl6.flowi6_iif = iif;
4723
4724 if (!ipv6_addr_any(&fl6.saddr))
4725 flags |= RT6_LOOKUP_F_HAS_SADDR;
4726
David Ahernb75cc8f2018-03-02 08:32:17 -08004727 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
Florian Westphal121622d2017-08-15 16:34:42 +02004728
4729 rcu_read_unlock();
Shmulik Ladkani72331bc2012-04-01 04:03:45 +00004730 } else {
4731 fl6.flowi6_oif = oif;
4732
Ido Schimmel58acfd72017-12-20 12:28:25 +02004733 dst = ip6_route_output(net, NULL, &fl6);
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004734 }
4735
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004736
4737 rt = container_of(dst, struct rt6_info, dst);
4738 if (rt->dst.error) {
4739 err = rt->dst.error;
4740 ip6_rt_put(rt);
4741 goto errout;
Thomas Grafab364a62006-08-22 00:01:47 -07004742 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004743
WANG Cong9d6acb32017-03-01 20:48:39 -08004744 if (rt == net->ipv6.ip6_null_entry) {
4745 err = rt->dst.error;
4746 ip6_rt_put(rt);
4747 goto errout;
4748 }
4749
David S. Millerfba961a2017-12-22 11:16:31 -05004750 if (fibmatch && rt->from) {
4751 struct rt6_info *ort = rt->from;
Ido Schimmel58acfd72017-12-20 12:28:25 +02004752
4753 dst_hold(&ort->dst);
4754 ip6_rt_put(rt);
4755 rt = ort;
4756 }
4757
Linus Torvalds1da177e2005-04-16 15:20:36 -07004758 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
David S. Miller38308472011-12-03 18:02:47 -05004759 if (!skb) {
Amerigo Wang94e187c2012-10-29 00:13:19 +00004760 ip6_rt_put(rt);
Thomas Grafab364a62006-08-22 00:01:47 -07004761 err = -ENOBUFS;
4762 goto errout;
4763 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004764
Changli Gaod8d1f302010-06-10 23:31:35 -07004765 skb_dst_set(skb, &rt->dst);
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004766 if (fibmatch)
David Ahernd4ead6b2018-04-17 17:33:16 -07004767 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, iif,
Roopa Prabhu18c3a612017-05-25 10:42:40 -07004768 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4769 nlh->nlmsg_seq, 0);
4770 else
David Ahernd4ead6b2018-04-17 17:33:16 -07004771 err = rt6_fill_node(net, skb, rt, dst, &fl6.daddr, &fl6.saddr,
4772 iif, RTM_NEWROUTE,
4773 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
4774 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004775 if (err < 0) {
Thomas Grafab364a62006-08-22 00:01:47 -07004776 kfree_skb(skb);
4777 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004778 }
4779
Eric W. Biederman15e47302012-09-07 20:12:54 +00004780 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
Thomas Grafab364a62006-08-22 00:01:47 -07004781errout:
Linus Torvalds1da177e2005-04-16 15:20:36 -07004782 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004783}
4784
Roopa Prabhu37a1d362015-09-13 10:18:33 -07004785void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
4786 unsigned int nlm_flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004787{
4788 struct sk_buff *skb;
Daniel Lezcano55786892008-03-04 13:47:47 -08004789 struct net *net = info->nl_net;
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08004790 u32 seq;
4791 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004792
Denis V. Lunev528c4ce2007-12-13 09:45:12 -08004793 err = -ENOBUFS;
David S. Miller38308472011-12-03 18:02:47 -05004794 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
Thomas Graf86872cb2006-08-22 00:01:08 -07004795
Roopa Prabhu19e42e42015-07-21 10:43:48 +02004796 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
David S. Miller38308472011-12-03 18:02:47 -05004797 if (!skb)
Thomas Graf21713eb2006-08-15 00:35:24 -07004798 goto errout;
4799
David Ahernd4ead6b2018-04-17 17:33:16 -07004800 err = rt6_fill_node(net, skb, rt, NULL, NULL, NULL, 0,
4801 event, info->portid, seq, nlm_flags);
Patrick McHardy26932562007-01-31 23:16:40 -08004802 if (err < 0) {
4803 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
4804 WARN_ON(err == -EMSGSIZE);
4805 kfree_skb(skb);
4806 goto errout;
4807 }
Eric W. Biederman15e47302012-09-07 20:12:54 +00004808 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
Pablo Neira Ayuso1ce85fe2009-02-24 23:18:28 -08004809 info->nlh, gfp_any());
4810 return;
Thomas Graf21713eb2006-08-15 00:35:24 -07004811errout:
4812 if (err < 0)
Daniel Lezcano55786892008-03-04 13:47:47 -08004813 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004814}
4815
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004816static int ip6_route_dev_notify(struct notifier_block *this,
Jiri Pirko351638e2013-05-28 01:30:21 +00004817 unsigned long event, void *ptr)
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004818{
Jiri Pirko351638e2013-05-28 01:30:21 +00004819 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
YOSHIFUJI Hideakic346dca2008-03-25 21:47:49 +09004820 struct net *net = dev_net(dev);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004821
WANG Cong242d3a42017-05-08 10:12:13 -07004822 if (!(dev->flags & IFF_LOOPBACK))
4823 return NOTIFY_OK;
4824
4825 if (event == NETDEV_REGISTER) {
David Ahern421842e2018-04-17 17:33:18 -07004826 net->ipv6.fib6_null_entry->fib6_nh.nh_dev = dev;
4827 net->ipv6.fib6_null_entry->rt6i_idev = in6_dev_get(dev);
Changli Gaod8d1f302010-06-10 23:31:35 -07004828 net->ipv6.ip6_null_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004829 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
4830#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Changli Gaod8d1f302010-06-10 23:31:35 -07004831 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004832 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
Changli Gaod8d1f302010-06-10 23:31:35 -07004833 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004834 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
4835#endif
WANG Cong76da0702017-06-20 11:42:27 -07004836 } else if (event == NETDEV_UNREGISTER &&
4837 dev->reg_state != NETREG_UNREGISTERED) {
4838 /* NETDEV_UNREGISTER could be fired for multiple times by
4839 * netdev_wait_allrefs(). Make sure we only call this once.
4840 */
David Ahern421842e2018-04-17 17:33:18 -07004841 in6_dev_put_clear(&net->ipv6.fib6_null_entry->rt6i_idev);
Eric Dumazet12d94a82017-08-15 04:09:51 -07004842 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
WANG Cong242d3a42017-05-08 10:12:13 -07004843#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Eric Dumazet12d94a82017-08-15 04:09:51 -07004844 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
4845 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
WANG Cong242d3a42017-05-08 10:12:13 -07004846#endif
Daniel Lezcano8ed67782008-03-04 13:48:30 -08004847 }
4848
4849 return NOTIFY_OK;
4850}
4851
Linus Torvalds1da177e2005-04-16 15:20:36 -07004852/*
4853 * /proc
4854 */
4855
4856#ifdef CONFIG_PROC_FS
4857
Alexey Dobriyan33120b32007-11-06 05:27:11 -08004858static const struct file_operations ipv6_route_proc_fops = {
Alexey Dobriyan33120b32007-11-06 05:27:11 -08004859 .open = ipv6_route_open,
4860 .read = seq_read,
4861 .llseek = seq_lseek,
Hannes Frederic Sowa8d2ca1d2013-09-21 16:55:59 +02004862 .release = seq_release_net,
Alexey Dobriyan33120b32007-11-06 05:27:11 -08004863};
4864
Linus Torvalds1da177e2005-04-16 15:20:36 -07004865static int rt6_stats_seq_show(struct seq_file *seq, void *v)
4866{
Daniel Lezcano69ddb802008-03-04 13:46:23 -08004867 struct net *net = (struct net *)seq->private;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004868 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
Daniel Lezcano69ddb802008-03-04 13:46:23 -08004869 net->ipv6.rt6_stats->fib_nodes,
4870 net->ipv6.rt6_stats->fib_route_nodes,
Wei Wang81eb8442017-10-06 12:06:11 -07004871 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
Daniel Lezcano69ddb802008-03-04 13:46:23 -08004872 net->ipv6.rt6_stats->fib_rt_entries,
4873 net->ipv6.rt6_stats->fib_rt_cache,
Eric Dumazetfc66f952010-10-08 06:37:34 +00004874 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
Daniel Lezcano69ddb802008-03-04 13:46:23 -08004875 net->ipv6.rt6_stats->fib_discarded_routes);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004876
4877 return 0;
4878}
4879
4880static int rt6_stats_seq_open(struct inode *inode, struct file *file)
4881{
Pavel Emelyanovde05c552008-07-18 04:07:21 -07004882 return single_open_net(inode, file, rt6_stats_seq_show);
Daniel Lezcano69ddb802008-03-04 13:46:23 -08004883}
4884
Arjan van de Ven9a321442007-02-12 00:55:35 -08004885static const struct file_operations rt6_stats_seq_fops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004886 .open = rt6_stats_seq_open,
4887 .read = seq_read,
4888 .llseek = seq_lseek,
Pavel Emelyanovb6fcbdb2008-07-18 04:07:44 -07004889 .release = single_release_net,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004890};
4891#endif /* CONFIG_PROC_FS */
4892
4893#ifdef CONFIG_SYSCTL
4894
Linus Torvalds1da177e2005-04-16 15:20:36 -07004895static
Joe Perchesfe2c6332013-06-11 23:04:25 -07004896int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004897 void __user *buffer, size_t *lenp, loff_t *ppos)
4898{
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00004899 struct net *net;
4900 int delay;
4901 if (!write)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004902 return -EINVAL;
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00004903
4904 net = (struct net *)ctl->extra1;
4905 delay = net->ipv6.sysctl.flush_delay;
4906 proc_dointvec(ctl, write, buffer, lenp, ppos);
Michal Kubeček2ac3ac82013-08-01 10:04:14 +02004907 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00004908 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004909}
4910
Joe Perchesfe2c6332013-06-11 23:04:25 -07004911struct ctl_table ipv6_route_table_template[] = {
YOSHIFUJI Hideaki1ab14572007-02-09 23:24:49 +09004912 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004913 .procname = "flush",
Daniel Lezcano49905092008-01-10 03:01:01 -08004914 .data = &init_net.ipv6.sysctl.flush_delay,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004915 .maxlen = sizeof(int),
Dave Jones89c8b3a12005-04-28 12:11:49 -07004916 .mode = 0200,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004917 .proc_handler = ipv6_sysctl_rtcache_flush
Linus Torvalds1da177e2005-04-16 15:20:36 -07004918 },
4919 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004920 .procname = "gc_thresh",
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -08004921 .data = &ip6_dst_ops_template.gc_thresh,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004922 .maxlen = sizeof(int),
4923 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004924 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004925 },
4926 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004927 .procname = "max_size",
Daniel Lezcano49905092008-01-10 03:01:01 -08004928 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004929 .maxlen = sizeof(int),
4930 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004931 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004932 },
4933 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004934 .procname = "gc_min_interval",
Daniel Lezcano49905092008-01-10 03:01:01 -08004935 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004936 .maxlen = sizeof(int),
4937 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004938 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004939 },
4940 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004941 .procname = "gc_timeout",
Daniel Lezcano49905092008-01-10 03:01:01 -08004942 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004943 .maxlen = sizeof(int),
4944 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004945 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004946 },
4947 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004948 .procname = "gc_interval",
Daniel Lezcano49905092008-01-10 03:01:01 -08004949 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004950 .maxlen = sizeof(int),
4951 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004952 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004953 },
4954 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004955 .procname = "gc_elasticity",
Daniel Lezcano49905092008-01-10 03:01:01 -08004956 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004957 .maxlen = sizeof(int),
4958 .mode = 0644,
Min Zhangf3d3f612010-08-14 22:42:51 -07004959 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004960 },
4961 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004962 .procname = "mtu_expires",
Daniel Lezcano49905092008-01-10 03:01:01 -08004963 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004964 .maxlen = sizeof(int),
4965 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004966 .proc_handler = proc_dointvec_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004967 },
4968 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004969 .procname = "min_adv_mss",
Daniel Lezcano49905092008-01-10 03:01:01 -08004970 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004971 .maxlen = sizeof(int),
4972 .mode = 0644,
Min Zhangf3d3f612010-08-14 22:42:51 -07004973 .proc_handler = proc_dointvec,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004974 },
4975 {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004976 .procname = "gc_min_interval_ms",
Daniel Lezcano49905092008-01-10 03:01:01 -08004977 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004978 .maxlen = sizeof(int),
4979 .mode = 0644,
Alexey Dobriyan6d9f2392008-11-03 18:21:05 -08004980 .proc_handler = proc_dointvec_ms_jiffies,
Linus Torvalds1da177e2005-04-16 15:20:36 -07004981 },
Eric W. Biedermanf8572d82009-11-05 13:32:03 -08004982 { }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004983};
4984
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00004985struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
Daniel Lezcano760f2d02008-01-10 02:53:43 -08004986{
4987 struct ctl_table *table;
4988
4989 table = kmemdup(ipv6_route_table_template,
4990 sizeof(ipv6_route_table_template),
4991 GFP_KERNEL);
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09004992
4993 if (table) {
4994 table[0].data = &net->ipv6.sysctl.flush_delay;
Lucian Adrian Grijincuc486da32011-02-24 19:48:03 +00004995 table[0].extra1 = net;
Alexey Dobriyan86393e52009-08-29 01:34:49 +00004996 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09004997 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
4998 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
4999 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
5000 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
5001 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
5002 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
5003 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
Alexey Dobriyan9c69fab2009-12-18 20:11:03 -08005004 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
Eric W. Biederman464dc802012-11-16 03:02:59 +00005005
5006 /* Don't export sysctls to unprivileged users */
5007 if (net->user_ns != &init_user_ns)
5008 table[0].procname = NULL;
YOSHIFUJI Hideaki5ee09102008-02-28 00:24:28 +09005009 }
5010
Daniel Lezcano760f2d02008-01-10 02:53:43 -08005011 return table;
5012}
Linus Torvalds1da177e2005-04-16 15:20:36 -07005013#endif
5014
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00005015static int __net_init ip6_route_net_init(struct net *net)
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005016{
Pavel Emelyanov633d424b2008-04-21 14:25:23 -07005017 int ret = -ENOMEM;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005018
Alexey Dobriyan86393e52009-08-29 01:34:49 +00005019 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
5020 sizeof(net->ipv6.ip6_dst_ops));
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005021
Eric Dumazetfc66f952010-10-08 06:37:34 +00005022 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
5023 goto out_ip6_dst_ops;
5024
David Ahern421842e2018-04-17 17:33:18 -07005025 net->ipv6.fib6_null_entry = kmemdup(&fib6_null_entry_template,
5026 sizeof(*net->ipv6.fib6_null_entry),
5027 GFP_KERNEL);
5028 if (!net->ipv6.fib6_null_entry)
5029 goto out_ip6_dst_entries;
5030
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005031 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
5032 sizeof(*net->ipv6.ip6_null_entry),
5033 GFP_KERNEL);
5034 if (!net->ipv6.ip6_null_entry)
David Ahern421842e2018-04-17 17:33:18 -07005035 goto out_fib6_null_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07005036 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08005037 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
5038 ip6_template_metrics, true);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005039
5040#ifdef CONFIG_IPV6_MULTIPLE_TABLES
Vincent Bernatfeca7d82017-08-08 20:23:49 +02005041 net->ipv6.fib6_has_custom_rules = false;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005042 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
5043 sizeof(*net->ipv6.ip6_prohibit_entry),
5044 GFP_KERNEL);
Peter Zijlstra68fffc62008-10-07 14:12:10 -07005045 if (!net->ipv6.ip6_prohibit_entry)
5046 goto out_ip6_null_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07005047 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08005048 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
5049 ip6_template_metrics, true);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005050
5051 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
5052 sizeof(*net->ipv6.ip6_blk_hole_entry),
5053 GFP_KERNEL);
Peter Zijlstra68fffc62008-10-07 14:12:10 -07005054 if (!net->ipv6.ip6_blk_hole_entry)
5055 goto out_ip6_prohibit_entry;
Changli Gaod8d1f302010-06-10 23:31:35 -07005056 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
David S. Miller62fa8a82011-01-26 20:51:05 -08005057 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
5058 ip6_template_metrics, true);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005059#endif
5060
Peter Zijlstrab339a47c2008-10-07 14:15:00 -07005061 net->ipv6.sysctl.flush_delay = 0;
5062 net->ipv6.sysctl.ip6_rt_max_size = 4096;
5063 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
5064 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
5065 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
5066 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
5067 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
5068 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
5069
Benjamin Thery6891a342008-03-04 13:49:47 -08005070 net->ipv6.ip6_rt_gc_expire = 30*HZ;
5071
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005072 ret = 0;
5073out:
5074 return ret;
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005075
Peter Zijlstra68fffc62008-10-07 14:12:10 -07005076#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5077out_ip6_prohibit_entry:
5078 kfree(net->ipv6.ip6_prohibit_entry);
5079out_ip6_null_entry:
5080 kfree(net->ipv6.ip6_null_entry);
5081#endif
David Ahern421842e2018-04-17 17:33:18 -07005082out_fib6_null_entry:
5083 kfree(net->ipv6.fib6_null_entry);
Eric Dumazetfc66f952010-10-08 06:37:34 +00005084out_ip6_dst_entries:
5085 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005086out_ip6_dst_ops:
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005087 goto out;
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005088}
5089
Alexey Dobriyan2c8c1e72010-01-17 03:35:32 +00005090static void __net_exit ip6_route_net_exit(struct net *net)
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005091{
David Ahern421842e2018-04-17 17:33:18 -07005092 kfree(net->ipv6.fib6_null_entry);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005093 kfree(net->ipv6.ip6_null_entry);
5094#ifdef CONFIG_IPV6_MULTIPLE_TABLES
5095 kfree(net->ipv6.ip6_prohibit_entry);
5096 kfree(net->ipv6.ip6_blk_hole_entry);
5097#endif
Xiaotian Feng41bb78b2010-11-02 16:11:05 +00005098 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005099}
5100
Thomas Grafd1896342012-06-18 12:08:33 +00005101static int __net_init ip6_route_net_init_late(struct net *net)
5102{
5103#ifdef CONFIG_PROC_FS
Gao fengd4beaa62013-02-18 01:34:54 +00005104 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
Joe Perchesd6444062018-03-23 15:54:38 -07005105 proc_create("rt6_stats", 0444, net->proc_net, &rt6_stats_seq_fops);
Thomas Grafd1896342012-06-18 12:08:33 +00005106#endif
5107 return 0;
5108}
5109
5110static void __net_exit ip6_route_net_exit_late(struct net *net)
5111{
5112#ifdef CONFIG_PROC_FS
Gao fengece31ff2013-02-18 01:34:56 +00005113 remove_proc_entry("ipv6_route", net->proc_net);
5114 remove_proc_entry("rt6_stats", net->proc_net);
Thomas Grafd1896342012-06-18 12:08:33 +00005115#endif
5116}
5117
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005118static struct pernet_operations ip6_route_net_ops = {
5119 .init = ip6_route_net_init,
5120 .exit = ip6_route_net_exit,
5121};
5122
David S. Millerc3426b42012-06-09 16:27:05 -07005123static int __net_init ipv6_inetpeer_init(struct net *net)
5124{
5125 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
5126
5127 if (!bp)
5128 return -ENOMEM;
5129 inet_peer_base_init(bp);
5130 net->ipv6.peers = bp;
5131 return 0;
5132}
5133
5134static void __net_exit ipv6_inetpeer_exit(struct net *net)
5135{
5136 struct inet_peer_base *bp = net->ipv6.peers;
5137
5138 net->ipv6.peers = NULL;
David S. Miller56a6b242012-06-09 16:32:41 -07005139 inetpeer_invalidate_tree(bp);
David S. Millerc3426b42012-06-09 16:27:05 -07005140 kfree(bp);
5141}
5142
David S. Miller2b823f72012-06-09 19:00:16 -07005143static struct pernet_operations ipv6_inetpeer_ops = {
David S. Millerc3426b42012-06-09 16:27:05 -07005144 .init = ipv6_inetpeer_init,
5145 .exit = ipv6_inetpeer_exit,
5146};
5147
Thomas Grafd1896342012-06-18 12:08:33 +00005148static struct pernet_operations ip6_route_net_late_ops = {
5149 .init = ip6_route_net_init_late,
5150 .exit = ip6_route_net_exit_late,
5151};
5152
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005153static struct notifier_block ip6_route_dev_notifier = {
5154 .notifier_call = ip6_route_dev_notify,
WANG Cong242d3a42017-05-08 10:12:13 -07005155 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005156};
5157
WANG Cong2f460932017-05-03 22:07:31 -07005158void __init ip6_route_init_special_entries(void)
5159{
5160 /* Registering of the loopback is done before this portion of code,
5161 * the loopback reference in rt6_info will not be taken, do it
5162 * manually for init_net */
David Ahern421842e2018-04-17 17:33:18 -07005163 init_net.ipv6.fib6_null_entry->fib6_nh.nh_dev = init_net.loopback_dev;
5164 init_net.ipv6.fib6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
WANG Cong2f460932017-05-03 22:07:31 -07005165 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
5166 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5167 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5168 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
5169 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5170 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
5171 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5172 #endif
5173}
5174
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005175int __init ip6_route_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005176{
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005177 int ret;
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -07005178 int cpu;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005179
Daniel Lezcano9a7ec3a2008-03-04 13:48:53 -08005180 ret = -ENOMEM;
5181 ip6_dst_ops_template.kmem_cachep =
5182 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
5183 SLAB_HWCACHE_ALIGN, NULL);
5184 if (!ip6_dst_ops_template.kmem_cachep)
Fernando Carrijoc19a28e2009-01-07 18:09:08 -08005185 goto out;
David S. Miller14e50e52007-05-24 18:17:54 -07005186
Eric Dumazetfc66f952010-10-08 06:37:34 +00005187 ret = dst_entries_init(&ip6_dst_blackhole_ops);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005188 if (ret)
Daniel Lezcanobdb32892008-03-04 13:48:10 -08005189 goto out_kmem_cache;
Daniel Lezcanobdb32892008-03-04 13:48:10 -08005190
David S. Millerc3426b42012-06-09 16:27:05 -07005191 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
5192 if (ret)
David S. Millere8803b62012-06-16 01:12:19 -07005193 goto out_dst_entries;
Thomas Graf2a0c4512012-06-14 23:00:17 +00005194
David S. Miller7e52b332012-06-15 15:51:55 -07005195 ret = register_pernet_subsys(&ip6_route_net_ops);
5196 if (ret)
5197 goto out_register_inetpeer;
David S. Millerc3426b42012-06-09 16:27:05 -07005198
Arnaud Ebalard5dc121e2008-10-01 02:37:56 -07005199 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
5200
David S. Millere8803b62012-06-16 01:12:19 -07005201 ret = fib6_init();
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005202 if (ret)
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005203 goto out_register_subsys;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005204
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005205 ret = xfrm6_init();
5206 if (ret)
David S. Millere8803b62012-06-16 01:12:19 -07005207 goto out_fib6_init;
Daniel Lezcanoc35b7e72007-12-08 00:14:11 -08005208
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005209 ret = fib6_rules_init();
5210 if (ret)
5211 goto xfrm6_init;
Daniel Lezcano7e5449c2007-12-08 00:14:54 -08005212
Thomas Grafd1896342012-06-18 12:08:33 +00005213 ret = register_pernet_subsys(&ip6_route_net_late_ops);
5214 if (ret)
5215 goto fib6_rules_init;
5216
Florian Westphal16feebc2017-12-02 21:44:08 +01005217 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
5218 inet6_rtm_newroute, NULL, 0);
5219 if (ret < 0)
5220 goto out_register_late_subsys;
5221
5222 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
5223 inet6_rtm_delroute, NULL, 0);
5224 if (ret < 0)
5225 goto out_register_late_subsys;
5226
5227 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
5228 inet6_rtm_getroute, NULL,
5229 RTNL_FLAG_DOIT_UNLOCKED);
5230 if (ret < 0)
Thomas Grafd1896342012-06-18 12:08:33 +00005231 goto out_register_late_subsys;
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005232
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005233 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
Daniel Lezcanocdb18762008-03-04 13:45:33 -08005234 if (ret)
Thomas Grafd1896342012-06-18 12:08:33 +00005235 goto out_register_late_subsys;
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005236
Martin KaFai Lau8d0b94a2015-05-22 20:56:04 -07005237 for_each_possible_cpu(cpu) {
5238 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
5239
5240 INIT_LIST_HEAD(&ul->head);
5241 spin_lock_init(&ul->lock);
5242 }
5243
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005244out:
5245 return ret;
5246
Thomas Grafd1896342012-06-18 12:08:33 +00005247out_register_late_subsys:
Florian Westphal16feebc2017-12-02 21:44:08 +01005248 rtnl_unregister_all(PF_INET6);
Thomas Grafd1896342012-06-18 12:08:33 +00005249 unregister_pernet_subsys(&ip6_route_net_late_ops);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005250fib6_rules_init:
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005251 fib6_rules_cleanup();
5252xfrm6_init:
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005253 xfrm6_fini();
Thomas Graf2a0c4512012-06-14 23:00:17 +00005254out_fib6_init:
5255 fib6_gc_cleanup();
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005256out_register_subsys:
5257 unregister_pernet_subsys(&ip6_route_net_ops);
David S. Miller7e52b332012-06-15 15:51:55 -07005258out_register_inetpeer:
5259 unregister_pernet_subsys(&ipv6_inetpeer_ops);
Eric Dumazetfc66f952010-10-08 06:37:34 +00005260out_dst_entries:
5261 dst_entries_destroy(&ip6_dst_blackhole_ops);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005262out_kmem_cache:
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005263 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
Daniel Lezcano433d49c2007-12-07 00:43:48 -08005264 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005265}
5266
5267void ip6_route_cleanup(void)
5268{
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005269 unregister_netdevice_notifier(&ip6_route_dev_notifier);
Thomas Grafd1896342012-06-18 12:08:33 +00005270 unregister_pernet_subsys(&ip6_route_net_late_ops);
Thomas Graf101367c2006-08-04 03:39:02 -07005271 fib6_rules_cleanup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005272 xfrm6_fini();
Linus Torvalds1da177e2005-04-16 15:20:36 -07005273 fib6_gc_cleanup();
David S. Millerc3426b42012-06-09 16:27:05 -07005274 unregister_pernet_subsys(&ipv6_inetpeer_ops);
Daniel Lezcano8ed67782008-03-04 13:48:30 -08005275 unregister_pernet_subsys(&ip6_route_net_ops);
Xiaotian Feng41bb78b2010-11-02 16:11:05 +00005276 dst_entries_destroy(&ip6_dst_blackhole_ops);
Benjamin Theryf2fc6a52008-03-04 13:49:23 -08005277 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005278}