blob: cf7e6a92e73ca298b837296dc2b6e10b3b67e9ec [file] [log] [blame]
David Ahern193125d2015-08-13 14:59:10 -06001/*
2 * vrf.c: device driver to encapsulate a VRF space
3 *
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
7 *
8 * Based on dummy, team and ipvlan drivers
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/ip.h>
21#include <linux/init.h>
22#include <linux/moduleparam.h>
23#include <linux/netfilter.h>
24#include <linux/rtnetlink.h>
25#include <net/rtnetlink.h>
26#include <linux/u64_stats_sync.h>
27#include <linux/hashtable.h>
28
29#include <linux/inetdevice.h>
David Ahern8f583362015-08-27 10:10:50 -070030#include <net/arp.h>
David Ahern193125d2015-08-13 14:59:10 -060031#include <net/ip.h>
32#include <net/ip_fib.h>
David Ahern35402e32015-10-12 11:47:09 -070033#include <net/ip6_fib.h>
David Ahern193125d2015-08-13 14:59:10 -060034#include <net/ip6_route.h>
David Ahern193125d2015-08-13 14:59:10 -060035#include <net/route.h>
36#include <net/addrconf.h>
David Ahernee15ee52015-09-29 20:07:12 -070037#include <net/l3mdev.h>
David Ahern1aa6c4f2016-06-08 10:55:40 -070038#include <net/fib_rules.h>
David Ahern097d3c92017-06-08 11:31:11 -060039#include <net/netns/generic.h>
David Ahern193125d2015-08-13 14:59:10 -060040
41#define DRV_NAME "vrf"
42#define DRV_VERSION "1.0"
43
David Ahern1aa6c4f2016-06-08 10:55:40 -070044#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
David Ahern097d3c92017-06-08 11:31:11 -060045
46static unsigned int vrf_net_id;
David Ahern1aa6c4f2016-06-08 10:55:40 -070047
David Ahernec539512015-09-29 20:07:17 -070048struct net_vrf {
David Ahernb0e95cc2016-05-13 12:23:45 -070049 struct rtable __rcu *rth;
50 struct rt6_info __rcu *rt6;
David Ahern43b059a2018-04-17 17:33:09 -070051#if IS_ENABLED(CONFIG_IPV6)
52 struct fib6_table *fib6_table;
53#endif
David Ahernec539512015-09-29 20:07:17 -070054 u32 tb_id;
55};
56
David Ahern193125d2015-08-13 14:59:10 -060057struct pcpu_dstats {
58 u64 tx_pkts;
59 u64 tx_bytes;
60 u64 tx_drps;
61 u64 rx_pkts;
62 u64 rx_bytes;
David Ahernafe80a42016-06-06 20:50:39 -070063 u64 rx_drps;
David Ahern193125d2015-08-13 14:59:10 -060064 struct u64_stats_sync syncp;
65};
66
David Ahernafe80a42016-06-06 20:50:39 -070067static void vrf_rx_stats(struct net_device *dev, int len)
68{
69 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
70
71 u64_stats_update_begin(&dstats->syncp);
72 dstats->rx_pkts++;
73 dstats->rx_bytes += len;
74 u64_stats_update_end(&dstats->syncp);
75}
76
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +030077static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
78{
79 vrf_dev->stats.tx_errors++;
80 kfree_skb(skb);
81}
82
stephen hemmingerbc1f4472017-01-06 19:12:52 -080083static void vrf_get_stats64(struct net_device *dev,
84 struct rtnl_link_stats64 *stats)
David Ahern193125d2015-08-13 14:59:10 -060085{
86 int i;
87
88 for_each_possible_cpu(i) {
89 const struct pcpu_dstats *dstats;
90 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
91 unsigned int start;
92
93 dstats = per_cpu_ptr(dev->dstats, i);
94 do {
95 start = u64_stats_fetch_begin_irq(&dstats->syncp);
96 tbytes = dstats->tx_bytes;
97 tpkts = dstats->tx_pkts;
98 tdrops = dstats->tx_drps;
99 rbytes = dstats->rx_bytes;
100 rpkts = dstats->rx_pkts;
101 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
102 stats->tx_bytes += tbytes;
103 stats->tx_packets += tpkts;
104 stats->tx_dropped += tdrops;
105 stats->rx_bytes += rbytes;
106 stats->rx_packets += rpkts;
107 }
David Ahern193125d2015-08-13 14:59:10 -0600108}
109
David Aherndcdd43c2017-03-20 11:19:44 -0700110/* by default VRF devices do not have a qdisc and are expected
111 * to be created with only a single queue.
112 */
113static bool qdisc_tx_is_default(const struct net_device *dev)
114{
115 struct netdev_queue *txq;
116 struct Qdisc *qdisc;
117
118 if (dev->num_tx_queues > 1)
119 return false;
120
121 txq = netdev_get_tx_queue(dev, 0);
122 qdisc = rcu_access_pointer(txq->qdisc);
123
124 return !qdisc->enqueue;
125}
126
David Ahernafe80a42016-06-06 20:50:39 -0700127/* Local traffic destined to local address. Reinsert the packet to rx
128 * path, similar to loopback handling.
129 */
130static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
131 struct dst_entry *dst)
132{
133 int len = skb->len;
134
135 skb_orphan(skb);
136
137 skb_dst_set(skb, dst);
David Ahernafe80a42016-06-06 20:50:39 -0700138
139 /* set pkt_type to avoid skb hitting packet taps twice -
140 * once on Tx and again in Rx processing
141 */
142 skb->pkt_type = PACKET_LOOPBACK;
143
144 skb->protocol = eth_type_trans(skb, dev);
145
146 if (likely(netif_rx(skb) == NET_RX_SUCCESS))
147 vrf_rx_stats(dev, len);
148 else
149 this_cpu_inc(dev->dstats->rx_drps);
150
151 return NETDEV_TX_OK;
152}
153
David Ahern35402e32015-10-12 11:47:09 -0700154#if IS_ENABLED(CONFIG_IPV6)
David Ahern4c1feac2016-09-10 12:09:56 -0700155static int vrf_ip6_local_out(struct net *net, struct sock *sk,
156 struct sk_buff *skb)
157{
158 int err;
159
160 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
161 sk, skb, NULL, skb_dst(skb)->dev, dst_output);
162
163 if (likely(err == 1))
164 err = dst_output(net, sk, skb);
165
166 return err;
167}
168
David Ahern35402e32015-10-12 11:47:09 -0700169static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
170 struct net_device *dev)
171{
172 const struct ipv6hdr *iph = ipv6_hdr(skb);
173 struct net *net = dev_net(skb->dev);
174 struct flowi6 fl6 = {
175 /* needed to match OIF rule */
176 .flowi6_oif = dev->ifindex,
177 .flowi6_iif = LOOPBACK_IFINDEX,
178 .daddr = iph->daddr,
179 .saddr = iph->saddr,
180 .flowlabel = ip6_flowinfo(iph),
181 .flowi6_mark = skb->mark,
182 .flowi6_proto = iph->nexthdr,
David Ahernc71ad3d2016-09-10 12:10:02 -0700183 .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
David Ahern35402e32015-10-12 11:47:09 -0700184 };
185 int ret = NET_XMIT_DROP;
186 struct dst_entry *dst;
187 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
188
189 dst = ip6_route_output(net, NULL, &fl6);
190 if (dst == dst_null)
191 goto err;
192
193 skb_dst_drop(skb);
David Ahernb4869aa2016-06-06 20:50:40 -0700194
195 /* if dst.dev is loopback or the VRF device again this is locally
196 * originated traffic destined to a local address. Short circuit
David Ahern4f042562017-08-11 17:11:14 -0700197 * to Rx path
David Ahernb4869aa2016-06-06 20:50:40 -0700198 */
David Ahern4f042562017-08-11 17:11:14 -0700199 if (dst->dev == dev)
200 return vrf_local_xmit(skb, dev, dst);
David Ahernb4869aa2016-06-06 20:50:40 -0700201
David Ahern35402e32015-10-12 11:47:09 -0700202 skb_dst_set(skb, dst);
203
David Ahern911a66f2016-06-06 20:50:38 -0700204 /* strip the ethernet header added for pass through VRF device */
205 __skb_pull(skb, skb_network_offset(skb));
206
David Ahern4c1feac2016-09-10 12:09:56 -0700207 ret = vrf_ip6_local_out(net, skb->sk, skb);
David Ahern35402e32015-10-12 11:47:09 -0700208 if (unlikely(net_xmit_eval(ret)))
209 dev->stats.tx_errors++;
210 else
211 ret = NET_XMIT_SUCCESS;
212
213 return ret;
214err:
215 vrf_tx_error(dev, skb);
216 return NET_XMIT_DROP;
217}
218#else
David Ahern193125d2015-08-13 14:59:10 -0600219static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
220 struct net_device *dev)
221{
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300222 vrf_tx_error(dev, skb);
223 return NET_XMIT_DROP;
David Ahern193125d2015-08-13 14:59:10 -0600224}
David Ahern35402e32015-10-12 11:47:09 -0700225#endif
David Ahern193125d2015-08-13 14:59:10 -0600226
David Ahernebfc1022016-09-10 12:09:55 -0700227/* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
228static int vrf_ip_local_out(struct net *net, struct sock *sk,
229 struct sk_buff *skb)
230{
231 int err;
232
233 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
234 skb, NULL, skb_dst(skb)->dev, dst_output);
235 if (likely(err == 1))
236 err = dst_output(net, sk, skb);
237
238 return err;
239}
240
David Ahern193125d2015-08-13 14:59:10 -0600241static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
242 struct net_device *vrf_dev)
243{
244 struct iphdr *ip4h = ip_hdr(skb);
245 int ret = NET_XMIT_DROP;
246 struct flowi4 fl4 = {
247 /* needed to match OIF rule */
248 .flowi4_oif = vrf_dev->ifindex,
249 .flowi4_iif = LOOPBACK_IFINDEX,
250 .flowi4_tos = RT_TOS(ip4h->tos),
David Ahernc71ad3d2016-09-10 12:10:02 -0700251 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
David Ahern7a18c5b2017-01-10 14:37:35 -0800252 .flowi4_proto = ip4h->protocol,
David Ahern193125d2015-08-13 14:59:10 -0600253 .daddr = ip4h->daddr,
David Ahern7a18c5b2017-01-10 14:37:35 -0800254 .saddr = ip4h->saddr,
David Ahern193125d2015-08-13 14:59:10 -0600255 };
David Ahern911a66f2016-06-06 20:50:38 -0700256 struct net *net = dev_net(vrf_dev);
257 struct rtable *rt;
David Ahern193125d2015-08-13 14:59:10 -0600258
David Ahern911a66f2016-06-06 20:50:38 -0700259 rt = ip_route_output_flow(net, &fl4, NULL);
260 if (IS_ERR(rt))
David Ahern193125d2015-08-13 14:59:10 -0600261 goto err;
262
David Ahern911a66f2016-06-06 20:50:38 -0700263 skb_dst_drop(skb);
David Ahernafe80a42016-06-06 20:50:39 -0700264
265 /* if dst.dev is loopback or the VRF device again this is locally
266 * originated traffic destined to a local address. Short circuit
David Ahern4f042562017-08-11 17:11:14 -0700267 * to Rx path
David Ahernafe80a42016-06-06 20:50:39 -0700268 */
David Ahern4f042562017-08-11 17:11:14 -0700269 if (rt->dst.dev == vrf_dev)
270 return vrf_local_xmit(skb, vrf_dev, &rt->dst);
David Ahernafe80a42016-06-06 20:50:39 -0700271
David Ahern911a66f2016-06-06 20:50:38 -0700272 skb_dst_set(skb, &rt->dst);
273
274 /* strip the ethernet header added for pass through VRF device */
275 __skb_pull(skb, skb_network_offset(skb));
276
David Ahern193125d2015-08-13 14:59:10 -0600277 if (!ip4h->saddr) {
278 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
279 RT_SCOPE_LINK);
280 }
281
David Ahernebfc1022016-09-10 12:09:55 -0700282 ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
David Ahern193125d2015-08-13 14:59:10 -0600283 if (unlikely(net_xmit_eval(ret)))
284 vrf_dev->stats.tx_errors++;
285 else
286 ret = NET_XMIT_SUCCESS;
287
288out:
289 return ret;
290err:
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300291 vrf_tx_error(vrf_dev, skb);
David Ahern193125d2015-08-13 14:59:10 -0600292 goto out;
293}
294
295static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
296{
297 switch (skb->protocol) {
298 case htons(ETH_P_IP):
299 return vrf_process_v4_outbound(skb, dev);
300 case htons(ETH_P_IPV6):
301 return vrf_process_v6_outbound(skb, dev);
302 default:
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300303 vrf_tx_error(dev, skb);
David Ahern193125d2015-08-13 14:59:10 -0600304 return NET_XMIT_DROP;
305 }
306}
307
308static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
309{
David Ahernf7887d42017-03-06 08:53:04 -0800310 int len = skb->len;
David Ahern193125d2015-08-13 14:59:10 -0600311 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
312
313 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
314 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
315
316 u64_stats_update_begin(&dstats->syncp);
317 dstats->tx_pkts++;
David Ahernf7887d42017-03-06 08:53:04 -0800318 dstats->tx_bytes += len;
David Ahern193125d2015-08-13 14:59:10 -0600319 u64_stats_update_end(&dstats->syncp);
320 } else {
321 this_cpu_inc(dev->dstats->tx_drps);
322 }
323
324 return ret;
325}
326
David Aherndcdd43c2017-03-20 11:19:44 -0700327static int vrf_finish_direct(struct net *net, struct sock *sk,
328 struct sk_buff *skb)
329{
330 struct net_device *vrf_dev = skb->dev;
331
332 if (!list_empty(&vrf_dev->ptype_all) &&
333 likely(skb_headroom(skb) >= ETH_HLEN)) {
Johannes Bergd58ff352017-06-16 14:29:23 +0200334 struct ethhdr *eth = skb_push(skb, ETH_HLEN);
David Aherndcdd43c2017-03-20 11:19:44 -0700335
336 ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
337 eth_zero_addr(eth->h_dest);
338 eth->h_proto = skb->protocol;
339
340 rcu_read_lock_bh();
341 dev_queue_xmit_nit(skb, vrf_dev);
342 rcu_read_unlock_bh();
343
344 skb_pull(skb, ETH_HLEN);
345 }
346
347 return 1;
348}
349
David Ahern35402e32015-10-12 11:47:09 -0700350#if IS_ENABLED(CONFIG_IPV6)
David Ahern35402e32015-10-12 11:47:09 -0700351/* modelled after ip6_finish_output2 */
352static int vrf_finish_output6(struct net *net, struct sock *sk,
353 struct sk_buff *skb)
354{
355 struct dst_entry *dst = skb_dst(skb);
356 struct net_device *dev = dst->dev;
357 struct neighbour *neigh;
358 struct in6_addr *nexthop;
359 int ret;
360
David Aherneb63ecc2016-12-14 14:31:11 -0800361 nf_reset(skb);
362
David Ahern35402e32015-10-12 11:47:09 -0700363 skb->protocol = htons(ETH_P_IPV6);
364 skb->dev = dev;
365
366 rcu_read_lock_bh();
367 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
368 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
369 if (unlikely(!neigh))
370 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
371 if (!IS_ERR(neigh)) {
Julian Anastasov4ff06202017-02-06 23:14:12 +0200372 sock_confirm_neigh(skb, neigh);
David Ahern0353f282019-04-05 16:30:33 -0700373 ret = neigh_output(neigh, skb, false);
David Ahern35402e32015-10-12 11:47:09 -0700374 rcu_read_unlock_bh();
375 return ret;
376 }
377 rcu_read_unlock_bh();
378
379 IP6_INC_STATS(dev_net(dst->dev),
380 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
381 kfree_skb(skb);
382 return -EINVAL;
383}
384
385/* modelled after ip6_output */
386static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
387{
388 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
389 net, sk, skb, NULL, skb_dst(skb)->dev,
390 vrf_finish_output6,
391 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
392}
393
David Ahern4c1feac2016-09-10 12:09:56 -0700394/* set dst on skb to send packet to us via dev_xmit path. Allows
395 * packet to go through device based features such as qdisc, netfilter
396 * hooks and packet sockets with skb->dev set to vrf device.
397 */
David Aherna9ec54d2017-03-20 11:19:45 -0700398static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
399 struct sk_buff *skb)
David Ahern4c1feac2016-09-10 12:09:56 -0700400{
401 struct net_vrf *vrf = netdev_priv(vrf_dev);
402 struct dst_entry *dst = NULL;
403 struct rt6_info *rt6;
404
David Ahern4c1feac2016-09-10 12:09:56 -0700405 rcu_read_lock();
406
407 rt6 = rcu_dereference(vrf->rt6);
408 if (likely(rt6)) {
409 dst = &rt6->dst;
410 dst_hold(dst);
411 }
412
413 rcu_read_unlock();
414
415 if (unlikely(!dst)) {
416 vrf_tx_error(vrf_dev, skb);
417 return NULL;
418 }
419
420 skb_dst_drop(skb);
421 skb_dst_set(skb, dst);
422
423 return skb;
424}
425
David Aherna9ec54d2017-03-20 11:19:45 -0700426static int vrf_output6_direct(struct net *net, struct sock *sk,
427 struct sk_buff *skb)
428{
429 skb->protocol = htons(ETH_P_IPV6);
430
431 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
432 net, sk, skb, NULL, skb->dev,
433 vrf_finish_direct,
434 !(IPCB(skb)->flags & IPSKB_REROUTED));
435}
436
437static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
438 struct sock *sk,
439 struct sk_buff *skb)
440{
441 struct net *net = dev_net(vrf_dev);
442 int err;
443
444 skb->dev = vrf_dev;
445
446 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
447 skb, NULL, vrf_dev, vrf_output6_direct);
448
449 if (likely(err == 1))
450 err = vrf_output6_direct(net, sk, skb);
451
452 /* reset skb device */
453 if (likely(err == 1))
454 nf_reset(skb);
455 else
456 skb = NULL;
457
458 return skb;
459}
460
461static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
462 struct sock *sk,
463 struct sk_buff *skb)
464{
465 /* don't divert link scope packets */
466 if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
467 return skb;
468
469 if (qdisc_tx_is_default(vrf_dev))
470 return vrf_ip6_out_direct(vrf_dev, sk, skb);
471
472 return vrf_ip6_out_redirect(vrf_dev, skb);
473}
474
David Ahernb0e95cc2016-05-13 12:23:45 -0700475/* holding rtnl */
David Ahern810e5302016-06-14 11:37:21 -0700476static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
David Ahern35402e32015-10-12 11:47:09 -0700477{
David Ahernb0e95cc2016-05-13 12:23:45 -0700478 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
David Ahern810e5302016-06-14 11:37:21 -0700479 struct net *net = dev_net(dev);
480 struct dst_entry *dst;
David Ahernb0e95cc2016-05-13 12:23:45 -0700481
David Ahernb4869aa2016-06-06 20:50:40 -0700482 RCU_INIT_POINTER(vrf->rt6, NULL);
David Ahernb4869aa2016-06-06 20:50:40 -0700483 synchronize_rcu();
David Ahernb0e95cc2016-05-13 12:23:45 -0700484
David Ahern810e5302016-06-14 11:37:21 -0700485 /* move dev in dst's to loopback so this VRF device can be deleted
486 * - based on dst_ifdown
487 */
488 if (rt6) {
489 dst = &rt6->dst;
490 dev_put(dst->dev);
491 dst->dev = net->loopback_dev;
492 dev_hold(dst->dev);
493 dst_release(dst);
494 }
David Ahern35402e32015-10-12 11:47:09 -0700495}
496
497static int vrf_rt6_create(struct net_device *dev)
498{
Wei Wanga4c2fd72017-06-17 10:42:42 -0700499 int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM;
David Ahern35402e32015-10-12 11:47:09 -0700500 struct net_vrf *vrf = netdev_priv(dev);
David Ahern9ab179d2016-04-07 11:10:06 -0700501 struct net *net = dev_net(dev);
David Ahern4f042562017-08-11 17:11:14 -0700502 struct rt6_info *rt6;
David Ahern35402e32015-10-12 11:47:09 -0700503 int rc = -ENOMEM;
504
David Aherne4348632016-06-09 10:21:00 -0700505 /* IPv6 can be CONFIG enabled and then disabled runtime */
506 if (!ipv6_mod_enabled())
507 return 0;
508
David Ahern43b059a2018-04-17 17:33:09 -0700509 vrf->fib6_table = fib6_new_table(net, vrf->tb_id);
510 if (!vrf->fib6_table)
David Ahernb3b46632016-05-04 21:46:12 -0700511 goto out;
512
David Ahernb4869aa2016-06-06 20:50:40 -0700513 /* create a dst for routing packets out a VRF device */
514 rt6 = ip6_dst_alloc(net, dev, flags);
David Ahern35402e32015-10-12 11:47:09 -0700515 if (!rt6)
516 goto out;
517
David Ahernb3b46632016-05-04 21:46:12 -0700518 rt6->dst.output = vrf_output6;
David Ahernb4869aa2016-06-06 20:50:40 -0700519
David Ahernb0e95cc2016-05-13 12:23:45 -0700520 rcu_assign_pointer(vrf->rt6, rt6);
521
David Ahern35402e32015-10-12 11:47:09 -0700522 rc = 0;
523out:
524 return rc;
525}
526#else
David Ahern4c1feac2016-09-10 12:09:56 -0700527static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
528 struct sock *sk,
529 struct sk_buff *skb)
530{
531 return skb;
532}
533
David Ahern810e5302016-06-14 11:37:21 -0700534static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
David Ahern35402e32015-10-12 11:47:09 -0700535{
536}
537
538static int vrf_rt6_create(struct net_device *dev)
539{
540 return 0;
541}
542#endif
543
David Ahern8f583362015-08-27 10:10:50 -0700544/* modelled after ip_finish_output2 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500545static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
David Ahern193125d2015-08-13 14:59:10 -0600546{
David Ahern8f583362015-08-27 10:10:50 -0700547 struct dst_entry *dst = skb_dst(skb);
548 struct rtable *rt = (struct rtable *)dst;
549 struct net_device *dev = dst->dev;
550 unsigned int hh_len = LL_RESERVED_SPACE(dev);
551 struct neighbour *neigh;
David Ahern5c9f7c12019-04-05 16:30:34 -0700552 bool is_v6gw = false;
David Ahern8f583362015-08-27 10:10:50 -0700553 int ret = -EINVAL;
554
David Aherneb63ecc2016-12-14 14:31:11 -0800555 nf_reset(skb);
556
David Ahern8f583362015-08-27 10:10:50 -0700557 /* Be paranoid, rather than too clever. */
558 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
559 struct sk_buff *skb2;
560
561 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
562 if (!skb2) {
563 ret = -ENOMEM;
564 goto err;
565 }
566 if (skb->sk)
567 skb_set_owner_w(skb2, skb->sk);
568
569 consume_skb(skb);
570 skb = skb2;
571 }
572
573 rcu_read_lock_bh();
574
David Ahern5c9f7c12019-04-05 16:30:34 -0700575 neigh = ip_neigh_for_gw(rt, skb, &is_v6gw);
Julian Anastasov4ff06202017-02-06 23:14:12 +0200576 if (!IS_ERR(neigh)) {
577 sock_confirm_neigh(skb, neigh);
David Ahern5c9f7c12019-04-05 16:30:34 -0700578 /* if crossing protocols, can not use the cached header */
579 ret = neigh_output(neigh, skb, is_v6gw);
David Ahern82dd0d22018-03-29 12:49:52 -0700580 rcu_read_unlock_bh();
581 return ret;
Julian Anastasov4ff06202017-02-06 23:14:12 +0200582 }
David Ahern8f583362015-08-27 10:10:50 -0700583
584 rcu_read_unlock_bh();
585err:
David Ahern82dd0d22018-03-29 12:49:52 -0700586 vrf_tx_error(skb->dev, skb);
David Ahern8f583362015-08-27 10:10:50 -0700587 return ret;
David Ahern193125d2015-08-13 14:59:10 -0600588}
589
Eric W. Biedermanede20592015-10-07 16:48:47 -0500590static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
David Ahern193125d2015-08-13 14:59:10 -0600591{
592 struct net_device *dev = skb_dst(skb)->dev;
593
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500594 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
David Ahern193125d2015-08-13 14:59:10 -0600595
596 skb->dev = dev;
597 skb->protocol = htons(ETH_P_IP);
598
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500599 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
600 net, sk, skb, NULL, dev,
David Ahern8f583362015-08-27 10:10:50 -0700601 vrf_finish_output,
David Ahern193125d2015-08-13 14:59:10 -0600602 !(IPCB(skb)->flags & IPSKB_REROUTED));
603}
604
David Ahernebfc1022016-09-10 12:09:55 -0700605/* set dst on skb to send packet to us via dev_xmit path. Allows
606 * packet to go through device based features such as qdisc, netfilter
607 * hooks and packet sockets with skb->dev set to vrf device.
608 */
David Aherndcdd43c2017-03-20 11:19:44 -0700609static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
610 struct sk_buff *skb)
David Ahernebfc1022016-09-10 12:09:55 -0700611{
612 struct net_vrf *vrf = netdev_priv(vrf_dev);
613 struct dst_entry *dst = NULL;
614 struct rtable *rth;
615
616 rcu_read_lock();
617
618 rth = rcu_dereference(vrf->rth);
619 if (likely(rth)) {
620 dst = &rth->dst;
621 dst_hold(dst);
622 }
623
624 rcu_read_unlock();
625
626 if (unlikely(!dst)) {
627 vrf_tx_error(vrf_dev, skb);
628 return NULL;
629 }
630
631 skb_dst_drop(skb);
632 skb_dst_set(skb, dst);
633
634 return skb;
635}
636
David Aherndcdd43c2017-03-20 11:19:44 -0700637static int vrf_output_direct(struct net *net, struct sock *sk,
638 struct sk_buff *skb)
639{
640 skb->protocol = htons(ETH_P_IP);
641
642 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
643 net, sk, skb, NULL, skb->dev,
644 vrf_finish_direct,
645 !(IPCB(skb)->flags & IPSKB_REROUTED));
646}
647
648static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
649 struct sock *sk,
650 struct sk_buff *skb)
651{
652 struct net *net = dev_net(vrf_dev);
653 int err;
654
655 skb->dev = vrf_dev;
656
657 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
658 skb, NULL, vrf_dev, vrf_output_direct);
659
660 if (likely(err == 1))
661 err = vrf_output_direct(net, sk, skb);
662
663 /* reset skb device */
664 if (likely(err == 1))
665 nf_reset(skb);
666 else
667 skb = NULL;
668
669 return skb;
670}
671
672static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
673 struct sock *sk,
674 struct sk_buff *skb)
675{
David Ahern1e19c4d2018-01-24 19:37:37 -0800676 /* don't divert multicast or local broadcast */
677 if (ipv4_is_multicast(ip_hdr(skb)->daddr) ||
678 ipv4_is_lbcast(ip_hdr(skb)->daddr))
David Aherndcdd43c2017-03-20 11:19:44 -0700679 return skb;
680
681 if (qdisc_tx_is_default(vrf_dev))
682 return vrf_ip_out_direct(vrf_dev, sk, skb);
683
684 return vrf_ip_out_redirect(vrf_dev, skb);
685}
686
David Ahernebfc1022016-09-10 12:09:55 -0700687/* called with rcu lock held */
688static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
689 struct sock *sk,
690 struct sk_buff *skb,
691 u16 proto)
692{
693 switch (proto) {
694 case AF_INET:
695 return vrf_ip_out(vrf_dev, sk, skb);
David Ahern4c1feac2016-09-10 12:09:56 -0700696 case AF_INET6:
697 return vrf_ip6_out(vrf_dev, sk, skb);
David Ahernebfc1022016-09-10 12:09:55 -0700698 }
699
700 return skb;
701}
702
David Ahernb0e95cc2016-05-13 12:23:45 -0700703/* holding rtnl */
David Ahern810e5302016-06-14 11:37:21 -0700704static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
David Ahern193125d2015-08-13 14:59:10 -0600705{
David Ahernb0e95cc2016-05-13 12:23:45 -0700706 struct rtable *rth = rtnl_dereference(vrf->rth);
David Ahern810e5302016-06-14 11:37:21 -0700707 struct net *net = dev_net(dev);
708 struct dst_entry *dst;
David Ahern193125d2015-08-13 14:59:10 -0600709
David Ahernafe80a42016-06-06 20:50:39 -0700710 RCU_INIT_POINTER(vrf->rth, NULL);
David Ahernafe80a42016-06-06 20:50:39 -0700711 synchronize_rcu();
David Ahernb0e95cc2016-05-13 12:23:45 -0700712
David Ahern810e5302016-06-14 11:37:21 -0700713 /* move dev in dst's to loopback so this VRF device can be deleted
714 * - based on dst_ifdown
715 */
716 if (rth) {
717 dst = &rth->dst;
718 dev_put(dst->dev);
719 dst->dev = net->loopback_dev;
720 dev_hold(dst->dev);
721 dst_release(dst);
722 }
David Ahern193125d2015-08-13 14:59:10 -0600723}
724
David Ahernb0e95cc2016-05-13 12:23:45 -0700725static int vrf_rtable_create(struct net_device *dev)
David Ahern193125d2015-08-13 14:59:10 -0600726{
David Ahernb7503e02015-09-02 13:58:35 -0700727 struct net_vrf *vrf = netdev_priv(dev);
David Ahern4f042562017-08-11 17:11:14 -0700728 struct rtable *rth;
David Ahern193125d2015-08-13 14:59:10 -0600729
David Ahernb3b46632016-05-04 21:46:12 -0700730 if (!fib_new_table(dev_net(dev), vrf->tb_id))
David Ahernb0e95cc2016-05-13 12:23:45 -0700731 return -ENOMEM;
David Ahernb3b46632016-05-04 21:46:12 -0700732
David Ahernafe80a42016-06-06 20:50:39 -0700733 /* create a dst for routing packets out through a VRF device */
David Ahern9ab179d2016-04-07 11:10:06 -0700734 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
David Ahernb0e95cc2016-05-13 12:23:45 -0700735 if (!rth)
736 return -ENOMEM;
David Ahern193125d2015-08-13 14:59:10 -0600737
David Ahernb0e95cc2016-05-13 12:23:45 -0700738 rth->dst.output = vrf_output;
David Ahernb0e95cc2016-05-13 12:23:45 -0700739
740 rcu_assign_pointer(vrf->rth, rth);
741
742 return 0;
David Ahern193125d2015-08-13 14:59:10 -0600743}
744
745/**************************** device handling ********************/
746
747/* cycle interface to flush neighbor cache and move routes across tables */
Petr Machatadc1aea12018-12-06 17:05:38 +0000748static void cycle_netdev(struct net_device *dev,
749 struct netlink_ext_ack *extack)
David Ahern193125d2015-08-13 14:59:10 -0600750{
751 unsigned int flags = dev->flags;
752 int ret;
753
754 if (!netif_running(dev))
755 return;
756
Petr Machata567c5e12018-12-06 17:05:42 +0000757 ret = dev_change_flags(dev, flags & ~IFF_UP, extack);
David Ahern193125d2015-08-13 14:59:10 -0600758 if (ret >= 0)
Petr Machata567c5e12018-12-06 17:05:42 +0000759 ret = dev_change_flags(dev, flags, extack);
David Ahern193125d2015-08-13 14:59:10 -0600760
761 if (ret < 0) {
762 netdev_err(dev,
763 "Failed to cycle device %s; route tables might be wrong!\n",
764 dev->name);
765 }
766}
767
David Ahern42ab19e2017-10-04 17:48:47 -0700768static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
769 struct netlink_ext_ack *extack)
David Ahern193125d2015-08-13 14:59:10 -0600770{
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100771 int ret;
David Ahern193125d2015-08-13 14:59:10 -0600772
David Ahern26d31ac2017-04-26 07:58:22 -0700773 /* do not allow loopback device to be enslaved to a VRF.
774 * The vrf device acts as the loopback for the vrf.
775 */
David Ahernde3baa32017-10-04 17:48:48 -0700776 if (port_dev == dev_net(dev)->loopback_dev) {
777 NL_SET_ERR_MSG(extack,
778 "Can not enslave loopback device to a VRF");
David Ahern26d31ac2017-04-26 07:58:22 -0700779 return -EOPNOTSUPP;
David Ahernde3baa32017-10-04 17:48:48 -0700780 }
David Ahern26d31ac2017-04-26 07:58:22 -0700781
Ido Schimmelfdeea7b2017-03-16 09:08:15 +0100782 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
David Ahern42ab19e2017-10-04 17:48:47 -0700783 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL, extack);
David Ahern193125d2015-08-13 14:59:10 -0600784 if (ret < 0)
Ido Schimmelfdeea7b2017-03-16 09:08:15 +0100785 goto err;
David Ahern193125d2015-08-13 14:59:10 -0600786
Petr Machatadc1aea12018-12-06 17:05:38 +0000787 cycle_netdev(port_dev, extack);
David Ahern193125d2015-08-13 14:59:10 -0600788
789 return 0;
Ido Schimmelfdeea7b2017-03-16 09:08:15 +0100790
791err:
792 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
793 return ret;
David Ahern193125d2015-08-13 14:59:10 -0600794}
795
David Ahern33eaf2a2017-10-04 17:48:46 -0700796static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev,
797 struct netlink_ext_ack *extack)
David Ahern193125d2015-08-13 14:59:10 -0600798{
David Ahernde3baa32017-10-04 17:48:48 -0700799 if (netif_is_l3_master(port_dev)) {
800 NL_SET_ERR_MSG(extack,
801 "Can not enslave an L3 master device to a VRF");
802 return -EINVAL;
803 }
804
805 if (netif_is_l3_slave(port_dev))
David Ahern193125d2015-08-13 14:59:10 -0600806 return -EINVAL;
807
David Ahern42ab19e2017-10-04 17:48:47 -0700808 return do_vrf_add_slave(dev, port_dev, extack);
David Ahern193125d2015-08-13 14:59:10 -0600809}
810
811/* inverse of do_vrf_add_slave */
812static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
813{
David Ahern193125d2015-08-13 14:59:10 -0600814 netdev_upper_dev_unlink(port_dev, dev);
David Ahernfee6d4c2015-10-05 08:51:24 -0700815 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
David Ahern193125d2015-08-13 14:59:10 -0600816
Petr Machatadc1aea12018-12-06 17:05:38 +0000817 cycle_netdev(port_dev, NULL);
David Ahern193125d2015-08-13 14:59:10 -0600818
David Ahern193125d2015-08-13 14:59:10 -0600819 return 0;
820}
821
822static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
823{
David Ahern193125d2015-08-13 14:59:10 -0600824 return do_vrf_del_slave(dev, port_dev);
825}
826
827static void vrf_dev_uninit(struct net_device *dev)
828{
829 struct net_vrf *vrf = netdev_priv(dev);
David Ahern193125d2015-08-13 14:59:10 -0600830
David Ahern810e5302016-06-14 11:37:21 -0700831 vrf_rtable_release(dev, vrf);
832 vrf_rt6_release(dev, vrf);
David Ahern193125d2015-08-13 14:59:10 -0600833
Nikolay Aleksandrov3a4a27d2015-08-18 20:28:03 +0300834 free_percpu(dev->dstats);
David Ahern193125d2015-08-13 14:59:10 -0600835 dev->dstats = NULL;
836}
837
838static int vrf_dev_init(struct net_device *dev)
839{
840 struct net_vrf *vrf = netdev_priv(dev);
841
David Ahern193125d2015-08-13 14:59:10 -0600842 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
843 if (!dev->dstats)
844 goto out_nomem;
845
846 /* create the default dst which points back to us */
David Ahernb0e95cc2016-05-13 12:23:45 -0700847 if (vrf_rtable_create(dev) != 0)
David Ahern193125d2015-08-13 14:59:10 -0600848 goto out_stats;
849
David Ahern35402e32015-10-12 11:47:09 -0700850 if (vrf_rt6_create(dev) != 0)
851 goto out_rth;
852
David Ahern193125d2015-08-13 14:59:10 -0600853 dev->flags = IFF_MASTER | IFF_NOARP;
854
David Ahernb87ab6b2016-06-01 21:16:39 -0700855 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
856 dev->mtu = 64 * 1024;
857
858 /* similarly, oper state is irrelevant; set to up to avoid confusion */
859 dev->operstate = IF_OPER_UP;
Eric Dumazet78e7a2a2016-06-09 07:45:13 -0700860 netdev_lockdep_set_classes(dev);
David Ahern193125d2015-08-13 14:59:10 -0600861 return 0;
862
David Ahern35402e32015-10-12 11:47:09 -0700863out_rth:
David Ahern810e5302016-06-14 11:37:21 -0700864 vrf_rtable_release(dev, vrf);
David Ahern193125d2015-08-13 14:59:10 -0600865out_stats:
866 free_percpu(dev->dstats);
867 dev->dstats = NULL;
868out_nomem:
869 return -ENOMEM;
870}
871
872static const struct net_device_ops vrf_netdev_ops = {
873 .ndo_init = vrf_dev_init,
874 .ndo_uninit = vrf_dev_uninit,
875 .ndo_start_xmit = vrf_xmit,
Miaohe Lin6819e3f2019-04-20 12:09:39 +0800876 .ndo_set_mac_address = eth_mac_addr,
David Ahern193125d2015-08-13 14:59:10 -0600877 .ndo_get_stats64 = vrf_get_stats64,
878 .ndo_add_slave = vrf_add_slave,
879 .ndo_del_slave = vrf_del_slave,
880};
881
David Ahernee15ee52015-09-29 20:07:12 -0700882static u32 vrf_fib_table(const struct net_device *dev)
883{
884 struct net_vrf *vrf = netdev_priv(dev);
885
886 return vrf->tb_id;
887}
888
David Ahern73e20b72016-07-04 18:47:41 -0700889static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
890{
Gao Feng1a4a5bf2017-05-09 18:27:33 +0800891 kfree_skb(skb);
David Ahern73e20b72016-07-04 18:47:41 -0700892 return 0;
893}
894
895static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
896 struct sk_buff *skb,
897 struct net_device *dev)
898{
899 struct net *net = dev_net(dev);
900
Gao Feng1a4a5bf2017-05-09 18:27:33 +0800901 if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
David Ahern73e20b72016-07-04 18:47:41 -0700902 skb = NULL; /* kfree_skb(skb) handled by nf code */
903
904 return skb;
905}
906
David Ahern35402e32015-10-12 11:47:09 -0700907#if IS_ENABLED(CONFIG_IPV6)
David Ahern74b20582016-05-10 11:19:50 -0700908/* neighbor handling is done with actual device; do not want
909 * to flip skb->dev for those ndisc packets. This really fails
910 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
911 * a start.
912 */
913static bool ipv6_ndisc_frame(const struct sk_buff *skb)
914{
915 const struct ipv6hdr *iph = ipv6_hdr(skb);
916 bool rc = false;
917
918 if (iph->nexthdr == NEXTHDR_ICMP) {
919 const struct icmp6hdr *icmph;
920 struct icmp6hdr _icmph;
921
922 icmph = skb_header_pointer(skb, sizeof(*iph),
923 sizeof(_icmph), &_icmph);
924 if (!icmph)
925 goto out;
926
927 switch (icmph->icmp6_type) {
928 case NDISC_ROUTER_SOLICITATION:
929 case NDISC_ROUTER_ADVERTISEMENT:
930 case NDISC_NEIGHBOUR_SOLICITATION:
931 case NDISC_NEIGHBOUR_ADVERTISEMENT:
932 case NDISC_REDIRECT:
933 rc = true;
934 break;
935 }
936 }
937
938out:
939 return rc;
940}
941
David Ahern9ff74382016-06-13 13:44:19 -0700942static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
943 const struct net_device *dev,
944 struct flowi6 *fl6,
945 int ifindex,
David Ahernb75cc8f2018-03-02 08:32:17 -0800946 const struct sk_buff *skb,
David Ahern9ff74382016-06-13 13:44:19 -0700947 int flags)
948{
949 struct net_vrf *vrf = netdev_priv(dev);
David Ahern9ff74382016-06-13 13:44:19 -0700950
David Ahern43b059a2018-04-17 17:33:09 -0700951 return ip6_pol_route(net, vrf->fib6_table, ifindex, fl6, skb, flags);
David Ahern9ff74382016-06-13 13:44:19 -0700952}
953
954static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
955 int ifindex)
956{
957 const struct ipv6hdr *iph = ipv6_hdr(skb);
958 struct flowi6 fl6 = {
Arnd Bergmannecf09112017-09-12 22:10:53 +0200959 .flowi6_iif = ifindex,
960 .flowi6_mark = skb->mark,
961 .flowi6_proto = iph->nexthdr,
David Ahern9ff74382016-06-13 13:44:19 -0700962 .daddr = iph->daddr,
963 .saddr = iph->saddr,
964 .flowlabel = ip6_flowinfo(iph),
David Ahern9ff74382016-06-13 13:44:19 -0700965 };
966 struct net *net = dev_net(vrf_dev);
967 struct rt6_info *rt6;
968
David Ahernb75cc8f2018-03-02 08:32:17 -0800969 rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex, skb,
David Ahern9ff74382016-06-13 13:44:19 -0700970 RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
971 if (unlikely(!rt6))
972 return;
973
974 if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
975 return;
976
977 skb_dst_set(skb, &rt6->dst);
978}
979
David Ahern74b20582016-05-10 11:19:50 -0700980static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
981 struct sk_buff *skb)
982{
David Ahern9ff74382016-06-13 13:44:19 -0700983 int orig_iif = skb->skb_iif;
Mike Manning6f12fa72018-11-07 15:36:07 +0000984 bool need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
985 bool is_ndisc = ipv6_ndisc_frame(skb);
David Ahern9ff74382016-06-13 13:44:19 -0700986
Mike Manning6f12fa72018-11-07 15:36:07 +0000987 /* loopback, multicast & non-ND link-local traffic; do not push through
988 * packet taps again. Reset pkt_type for upper layers to process skb
David Ahernb4869aa2016-06-06 20:50:40 -0700989 */
Mike Manning6f12fa72018-11-07 15:36:07 +0000990 if (skb->pkt_type == PACKET_LOOPBACK || (need_strict && !is_ndisc)) {
David Ahernb4869aa2016-06-06 20:50:40 -0700991 skb->dev = vrf_dev;
992 skb->skb_iif = vrf_dev->ifindex;
David Aherna04a4802016-10-16 20:02:52 -0700993 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
Mike Manning6f12fa72018-11-07 15:36:07 +0000994 if (skb->pkt_type == PACKET_LOOPBACK)
995 skb->pkt_type = PACKET_HOST;
David Ahernb4869aa2016-06-06 20:50:40 -0700996 goto out;
997 }
998
Mike Manning6f12fa72018-11-07 15:36:07 +0000999 /* if packet is NDISC then keep the ingress interface */
1000 if (!is_ndisc) {
David Ahern926d93a2017-01-03 09:37:55 -08001001 vrf_rx_stats(vrf_dev, skb->len);
David Ahern74b20582016-05-10 11:19:50 -07001002 skb->dev = vrf_dev;
1003 skb->skb_iif = vrf_dev->ifindex;
1004
David Aherna9ec54d2017-03-20 11:19:45 -07001005 if (!list_empty(&vrf_dev->ptype_all)) {
1006 skb_push(skb, skb->mac_len);
1007 dev_queue_xmit_nit(skb, vrf_dev);
1008 skb_pull(skb, skb->mac_len);
1009 }
David Ahern74b20582016-05-10 11:19:50 -07001010
1011 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1012 }
1013
David Ahern9ff74382016-06-13 13:44:19 -07001014 if (need_strict)
1015 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1016
David Ahern73e20b72016-07-04 18:47:41 -07001017 skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
David Ahernb4869aa2016-06-06 20:50:40 -07001018out:
David Ahern74b20582016-05-10 11:19:50 -07001019 return skb;
1020}
1021
1022#else
1023static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1024 struct sk_buff *skb)
1025{
1026 return skb;
1027}
1028#endif
1029
1030static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
1031 struct sk_buff *skb)
1032{
1033 skb->dev = vrf_dev;
1034 skb->skb_iif = vrf_dev->ifindex;
David Aherna04a4802016-10-16 20:02:52 -07001035 IPCB(skb)->flags |= IPSKB_L3SLAVE;
David Ahern74b20582016-05-10 11:19:50 -07001036
David Aherne58e4152016-10-31 15:54:00 -07001037 if (ipv4_is_multicast(ip_hdr(skb)->daddr))
1038 goto out;
1039
David Ahernafe80a42016-06-06 20:50:39 -07001040 /* loopback traffic; do not push through packet taps again.
1041 * Reset pkt_type for upper layers to process skb
1042 */
1043 if (skb->pkt_type == PACKET_LOOPBACK) {
1044 skb->pkt_type = PACKET_HOST;
1045 goto out;
1046 }
1047
David Ahern926d93a2017-01-03 09:37:55 -08001048 vrf_rx_stats(vrf_dev, skb->len);
1049
David Aherndcdd43c2017-03-20 11:19:44 -07001050 if (!list_empty(&vrf_dev->ptype_all)) {
1051 skb_push(skb, skb->mac_len);
1052 dev_queue_xmit_nit(skb, vrf_dev);
1053 skb_pull(skb, skb->mac_len);
1054 }
David Ahern74b20582016-05-10 11:19:50 -07001055
David Ahern73e20b72016-07-04 18:47:41 -07001056 skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
David Ahernafe80a42016-06-06 20:50:39 -07001057out:
David Ahern74b20582016-05-10 11:19:50 -07001058 return skb;
1059}
1060
1061/* called with rcu lock held */
1062static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
1063 struct sk_buff *skb,
1064 u16 proto)
1065{
1066 switch (proto) {
1067 case AF_INET:
1068 return vrf_ip_rcv(vrf_dev, skb);
1069 case AF_INET6:
1070 return vrf_ip6_rcv(vrf_dev, skb);
1071 }
1072
1073 return skb;
1074}
1075
1076#if IS_ENABLED(CONFIG_IPV6)
David Ahern4c1feac2016-09-10 12:09:56 -07001077/* send to link-local or multicast address via interface enslaved to
1078 * VRF device. Force lookup to VRF table without changing flow struct
1079 */
1080static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
1081 struct flowi6 *fl6)
David Ahern35402e32015-10-12 11:47:09 -07001082{
David Ahern9ff74382016-06-13 13:44:19 -07001083 struct net *net = dev_net(dev);
David Ahern4c1feac2016-09-10 12:09:56 -07001084 int flags = RT6_LOOKUP_F_IFACE;
David Ahernb0e95cc2016-05-13 12:23:45 -07001085 struct dst_entry *dst = NULL;
David Ahern9ff74382016-06-13 13:44:19 -07001086 struct rt6_info *rt;
David Ahern35402e32015-10-12 11:47:09 -07001087
David Ahern4c1feac2016-09-10 12:09:56 -07001088 /* VRF device does not have a link-local address and
1089 * sending packets to link-local or mcast addresses over
1090 * a VRF device does not make sense
1091 */
1092 if (fl6->flowi6_oif == dev->ifindex) {
1093 dst = &net->ipv6.ip6_null_entry->dst;
1094 dst_hold(dst);
1095 return dst;
David Ahern35402e32015-10-12 11:47:09 -07001096 }
1097
David Ahern4c1feac2016-09-10 12:09:56 -07001098 if (!ipv6_addr_any(&fl6->saddr))
1099 flags |= RT6_LOOKUP_F_HAS_SADDR;
1100
David Ahernb75cc8f2018-03-02 08:32:17 -08001101 rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, NULL, flags);
David Ahern4c1feac2016-09-10 12:09:56 -07001102 if (rt)
1103 dst = &rt->dst;
David Ahern9ff74382016-06-13 13:44:19 -07001104
David Ahernb0e95cc2016-05-13 12:23:45 -07001105 return dst;
David Ahern35402e32015-10-12 11:47:09 -07001106}
1107#endif
1108
David Ahernee15ee52015-09-29 20:07:12 -07001109static const struct l3mdev_ops vrf_l3mdev_ops = {
1110 .l3mdev_fib_table = vrf_fib_table,
David Ahern74b20582016-05-10 11:19:50 -07001111 .l3mdev_l3_rcv = vrf_l3_rcv,
David Ahernebfc1022016-09-10 12:09:55 -07001112 .l3mdev_l3_out = vrf_l3_out,
David Ahern35402e32015-10-12 11:47:09 -07001113#if IS_ENABLED(CONFIG_IPV6)
David Ahern4c1feac2016-09-10 12:09:56 -07001114 .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
David Ahern35402e32015-10-12 11:47:09 -07001115#endif
David Ahernee15ee52015-09-29 20:07:12 -07001116};
1117
David Ahern193125d2015-08-13 14:59:10 -06001118static void vrf_get_drvinfo(struct net_device *dev,
1119 struct ethtool_drvinfo *info)
1120{
1121 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1122 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1123}
1124
1125static const struct ethtool_ops vrf_ethtool_ops = {
1126 .get_drvinfo = vrf_get_drvinfo,
1127};
1128
David Ahern1aa6c4f2016-06-08 10:55:40 -07001129static inline size_t vrf_fib_rule_nl_size(void)
1130{
1131 size_t sz;
1132
1133 sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1134 sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
1135 sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
Donald Sharp1b71af62018-02-23 14:01:52 -05001136 sz += nla_total_size(sizeof(u8)); /* FRA_PROTOCOL */
David Ahern1aa6c4f2016-06-08 10:55:40 -07001137
1138 return sz;
1139}
1140
1141static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1142{
1143 struct fib_rule_hdr *frh;
1144 struct nlmsghdr *nlh;
1145 struct sk_buff *skb;
1146 int err;
1147
David Aherne4348632016-06-09 10:21:00 -07001148 if (family == AF_INET6 && !ipv6_mod_enabled())
1149 return 0;
1150
David Ahern1aa6c4f2016-06-08 10:55:40 -07001151 skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1152 if (!skb)
1153 return -ENOMEM;
1154
1155 nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1156 if (!nlh)
1157 goto nla_put_failure;
1158
1159 /* rule only needs to appear once */
David Ahern426c87c2017-04-13 10:57:15 -06001160 nlh->nlmsg_flags |= NLM_F_EXCL;
David Ahern1aa6c4f2016-06-08 10:55:40 -07001161
1162 frh = nlmsg_data(nlh);
1163 memset(frh, 0, sizeof(*frh));
1164 frh->family = family;
1165 frh->action = FR_ACT_TO_TBL;
Donald Sharp1b71af62018-02-23 14:01:52 -05001166
1167 if (nla_put_u8(skb, FRA_PROTOCOL, RTPROT_KERNEL))
1168 goto nla_put_failure;
David Ahern1aa6c4f2016-06-08 10:55:40 -07001169
Jeff Barnhill18129a22017-11-01 14:58:09 +00001170 if (nla_put_u8(skb, FRA_L3MDEV, 1))
David Ahern1aa6c4f2016-06-08 10:55:40 -07001171 goto nla_put_failure;
1172
1173 if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1174 goto nla_put_failure;
1175
1176 nlmsg_end(skb, nlh);
1177
1178 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1179 skb->sk = dev_net(dev)->rtnl;
1180 if (add_it) {
David Ahernc21ef3e2017-04-16 09:48:24 -07001181 err = fib_nl_newrule(skb, nlh, NULL);
David Ahern1aa6c4f2016-06-08 10:55:40 -07001182 if (err == -EEXIST)
1183 err = 0;
1184 } else {
David Ahernc21ef3e2017-04-16 09:48:24 -07001185 err = fib_nl_delrule(skb, nlh, NULL);
David Ahern1aa6c4f2016-06-08 10:55:40 -07001186 if (err == -ENOENT)
1187 err = 0;
1188 }
1189 nlmsg_free(skb);
1190
1191 return err;
1192
1193nla_put_failure:
1194 nlmsg_free(skb);
1195
1196 return -EMSGSIZE;
1197}
1198
1199static int vrf_add_fib_rules(const struct net_device *dev)
1200{
1201 int err;
1202
1203 err = vrf_fib_rule(dev, AF_INET, true);
1204 if (err < 0)
1205 goto out_err;
1206
1207 err = vrf_fib_rule(dev, AF_INET6, true);
1208 if (err < 0)
1209 goto ipv6_err;
1210
David Aherne58e4152016-10-31 15:54:00 -07001211#if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1212 err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true);
1213 if (err < 0)
1214 goto ipmr_err;
1215#endif
1216
Patrick Ruddye4a38c02018-10-01 09:41:27 +01001217#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1218 err = vrf_fib_rule(dev, RTNL_FAMILY_IP6MR, true);
1219 if (err < 0)
1220 goto ip6mr_err;
1221#endif
1222
David Ahern1aa6c4f2016-06-08 10:55:40 -07001223 return 0;
1224
Patrick Ruddye4a38c02018-10-01 09:41:27 +01001225#if IS_ENABLED(CONFIG_IPV6_MROUTE_MULTIPLE_TABLES)
1226ip6mr_err:
1227 vrf_fib_rule(dev, RTNL_FAMILY_IPMR, false);
1228#endif
1229
David Aherne58e4152016-10-31 15:54:00 -07001230#if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1231ipmr_err:
1232 vrf_fib_rule(dev, AF_INET6, false);
1233#endif
1234
David Ahern1aa6c4f2016-06-08 10:55:40 -07001235ipv6_err:
1236 vrf_fib_rule(dev, AF_INET, false);
1237
1238out_err:
1239 netdev_err(dev, "Failed to add FIB rules.\n");
1240 return err;
1241}
1242
David Ahern193125d2015-08-13 14:59:10 -06001243static void vrf_setup(struct net_device *dev)
1244{
1245 ether_setup(dev);
1246
1247 /* Initialize the device structure. */
1248 dev->netdev_ops = &vrf_netdev_ops;
David Ahernee15ee52015-09-29 20:07:12 -07001249 dev->l3mdev_ops = &vrf_l3mdev_ops;
David Ahern193125d2015-08-13 14:59:10 -06001250 dev->ethtool_ops = &vrf_ethtool_ops;
David S. Millercf124db2017-05-08 12:52:56 -04001251 dev->needs_free_netdev = true;
David Ahern193125d2015-08-13 14:59:10 -06001252
1253 /* Fill in device structure with ethernet-generic values. */
1254 eth_hw_addr_random(dev);
1255
1256 /* don't acquire vrf device's netif_tx_lock when transmitting */
1257 dev->features |= NETIF_F_LLTX;
1258
1259 /* don't allow vrf devices to change network namespaces. */
1260 dev->features |= NETIF_F_NETNS_LOCAL;
David Ahern78896812016-06-13 17:14:12 -07001261
1262 /* does not make sense for a VLAN to be added to a vrf device */
1263 dev->features |= NETIF_F_VLAN_CHALLENGED;
1264
1265 /* enable offload features */
1266 dev->features |= NETIF_F_GSO_SOFTWARE;
Davide Caratticb160392018-05-24 17:49:35 +02001267 dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM | NETIF_F_SCTP_CRC;
David Ahern78896812016-06-13 17:14:12 -07001268 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1269
1270 dev->hw_features = dev->features;
1271 dev->hw_enc_features = dev->features;
1272
1273 /* default to no qdisc; user can add if desired */
1274 dev->priv_flags |= IFF_NO_QUEUE;
Sabrina Dubroca1017e092019-03-26 18:22:16 +01001275 dev->priv_flags |= IFF_NO_RX_HANDLER;
Miaohe Lin6819e3f2019-04-20 12:09:39 +08001276 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
Hangbin Liuad49bc62019-02-18 17:14:25 +08001277
Miaohe Lin50553762019-04-08 10:04:20 +08001278 /* VRF devices do not care about MTU, but if the MTU is set
1279 * too low then the ipv4 and ipv6 protocols are disabled
1280 * which breaks networking.
1281 */
1282 dev->min_mtu = IPV6_MIN_MTU;
1283 dev->max_mtu = ETH_MAX_MTU;
David Ahern193125d2015-08-13 14:59:10 -06001284}
1285
Matthias Schiffera8b8a8892017-06-25 23:56:01 +02001286static int vrf_validate(struct nlattr *tb[], struct nlattr *data[],
1287 struct netlink_ext_ack *extack)
David Ahern193125d2015-08-13 14:59:10 -06001288{
1289 if (tb[IFLA_ADDRESS]) {
David Ahern53b94832017-08-07 10:08:10 -07001290 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1291 NL_SET_ERR_MSG(extack, "Invalid hardware address");
David Ahern193125d2015-08-13 14:59:10 -06001292 return -EINVAL;
David Ahern53b94832017-08-07 10:08:10 -07001293 }
1294 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1295 NL_SET_ERR_MSG(extack, "Invalid hardware address");
David Ahern193125d2015-08-13 14:59:10 -06001296 return -EADDRNOTAVAIL;
David Ahern53b94832017-08-07 10:08:10 -07001297 }
David Ahern193125d2015-08-13 14:59:10 -06001298 }
1299 return 0;
1300}
1301
1302static void vrf_dellink(struct net_device *dev, struct list_head *head)
1303{
Nikolay Aleksandrovf630c382017-07-06 15:24:40 +03001304 struct net_device *port_dev;
1305 struct list_head *iter;
1306
1307 netdev_for_each_lower_dev(dev, port_dev, iter)
1308 vrf_del_slave(dev, port_dev);
1309
David Ahern193125d2015-08-13 14:59:10 -06001310 unregister_netdevice_queue(dev, head);
1311}
1312
1313static int vrf_newlink(struct net *src_net, struct net_device *dev,
Matthias Schiffer7a3f4a12017-06-25 23:55:59 +02001314 struct nlattr *tb[], struct nlattr *data[],
1315 struct netlink_ext_ack *extack)
David Ahern193125d2015-08-13 14:59:10 -06001316{
1317 struct net_vrf *vrf = netdev_priv(dev);
David Ahern097d3c92017-06-08 11:31:11 -06001318 bool *add_fib_rules;
1319 struct net *net;
David Ahern1aa6c4f2016-06-08 10:55:40 -07001320 int err;
David Ahern193125d2015-08-13 14:59:10 -06001321
David Ahern53b94832017-08-07 10:08:10 -07001322 if (!data || !data[IFLA_VRF_TABLE]) {
1323 NL_SET_ERR_MSG(extack, "VRF table id is missing");
David Ahern193125d2015-08-13 14:59:10 -06001324 return -EINVAL;
David Ahern53b94832017-08-07 10:08:10 -07001325 }
David Ahern193125d2015-08-13 14:59:10 -06001326
1327 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
David Ahern53b94832017-08-07 10:08:10 -07001328 if (vrf->tb_id == RT_TABLE_UNSPEC) {
1329 NL_SET_ERR_MSG_ATTR(extack, data[IFLA_VRF_TABLE],
1330 "Invalid VRF table id");
David Ahern24c63bb2017-01-10 15:22:25 -08001331 return -EINVAL;
David Ahern53b94832017-08-07 10:08:10 -07001332 }
David Ahern193125d2015-08-13 14:59:10 -06001333
David Ahern007979e2015-09-29 20:07:10 -07001334 dev->priv_flags |= IFF_L3MDEV_MASTER;
David Ahern193125d2015-08-13 14:59:10 -06001335
David Ahern1aa6c4f2016-06-08 10:55:40 -07001336 err = register_netdevice(dev);
1337 if (err)
1338 goto out;
1339
David Ahern097d3c92017-06-08 11:31:11 -06001340 net = dev_net(dev);
1341 add_fib_rules = net_generic(net, vrf_net_id);
1342 if (*add_fib_rules) {
David Ahern1aa6c4f2016-06-08 10:55:40 -07001343 err = vrf_add_fib_rules(dev);
1344 if (err) {
1345 unregister_netdevice(dev);
1346 goto out;
1347 }
David Ahern097d3c92017-06-08 11:31:11 -06001348 *add_fib_rules = false;
David Ahern1aa6c4f2016-06-08 10:55:40 -07001349 }
1350
1351out:
1352 return err;
David Ahern193125d2015-08-13 14:59:10 -06001353}
1354
1355static size_t vrf_nl_getsize(const struct net_device *dev)
1356{
1357 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
1358}
1359
1360static int vrf_fillinfo(struct sk_buff *skb,
1361 const struct net_device *dev)
1362{
1363 struct net_vrf *vrf = netdev_priv(dev);
1364
1365 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
1366}
1367
David Ahern67eb0332016-02-02 07:43:45 -08001368static size_t vrf_get_slave_size(const struct net_device *bond_dev,
1369 const struct net_device *slave_dev)
1370{
1371 return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */
1372}
1373
1374static int vrf_fill_slave_info(struct sk_buff *skb,
1375 const struct net_device *vrf_dev,
1376 const struct net_device *slave_dev)
1377{
1378 struct net_vrf *vrf = netdev_priv(vrf_dev);
1379
1380 if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
1381 return -EMSGSIZE;
1382
1383 return 0;
1384}
1385
David Ahern193125d2015-08-13 14:59:10 -06001386static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
1387 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
1388};
1389
1390static struct rtnl_link_ops vrf_link_ops __read_mostly = {
1391 .kind = DRV_NAME,
1392 .priv_size = sizeof(struct net_vrf),
1393
1394 .get_size = vrf_nl_getsize,
1395 .policy = vrf_nl_policy,
1396 .validate = vrf_validate,
1397 .fill_info = vrf_fillinfo,
1398
David Ahern67eb0332016-02-02 07:43:45 -08001399 .get_slave_size = vrf_get_slave_size,
1400 .fill_slave_info = vrf_fill_slave_info,
1401
David Ahern193125d2015-08-13 14:59:10 -06001402 .newlink = vrf_newlink,
1403 .dellink = vrf_dellink,
1404 .setup = vrf_setup,
1405 .maxtype = IFLA_VRF_MAX,
1406};
1407
1408static int vrf_device_event(struct notifier_block *unused,
1409 unsigned long event, void *ptr)
1410{
1411 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1412
1413 /* only care about unregister events to drop slave references */
1414 if (event == NETDEV_UNREGISTER) {
David Ahern193125d2015-08-13 14:59:10 -06001415 struct net_device *vrf_dev;
1416
David Ahernfee6d4c2015-10-05 08:51:24 -07001417 if (!netif_is_l3_slave(dev))
David Ahern193125d2015-08-13 14:59:10 -06001418 goto out;
1419
Nikolay Aleksandrov58aa9082015-08-18 20:28:04 +03001420 vrf_dev = netdev_master_upper_dev_get(dev);
1421 vrf_del_slave(vrf_dev, dev);
David Ahern193125d2015-08-13 14:59:10 -06001422 }
1423out:
1424 return NOTIFY_DONE;
1425}
1426
1427static struct notifier_block vrf_notifier_block __read_mostly = {
1428 .notifier_call = vrf_device_event,
1429};
1430
David Ahern097d3c92017-06-08 11:31:11 -06001431/* Initialize per network namespace state */
1432static int __net_init vrf_netns_init(struct net *net)
1433{
1434 bool *add_fib_rules = net_generic(net, vrf_net_id);
1435
1436 *add_fib_rules = true;
1437
1438 return 0;
1439}
1440
1441static struct pernet_operations vrf_net_ops __net_initdata = {
1442 .init = vrf_netns_init,
1443 .id = &vrf_net_id,
1444 .size = sizeof(bool),
1445};
1446
David Ahern193125d2015-08-13 14:59:10 -06001447static int __init vrf_init_module(void)
1448{
1449 int rc;
1450
David Ahern193125d2015-08-13 14:59:10 -06001451 register_netdevice_notifier(&vrf_notifier_block);
1452
David Ahern097d3c92017-06-08 11:31:11 -06001453 rc = register_pernet_subsys(&vrf_net_ops);
David Ahern193125d2015-08-13 14:59:10 -06001454 if (rc < 0)
1455 goto error;
1456
David Ahern097d3c92017-06-08 11:31:11 -06001457 rc = rtnl_link_register(&vrf_link_ops);
1458 if (rc < 0) {
1459 unregister_pernet_subsys(&vrf_net_ops);
1460 goto error;
1461 }
1462
David Ahern193125d2015-08-13 14:59:10 -06001463 return 0;
1464
1465error:
1466 unregister_netdevice_notifier(&vrf_notifier_block);
David Ahern193125d2015-08-13 14:59:10 -06001467 return rc;
1468}
1469
David Ahern193125d2015-08-13 14:59:10 -06001470module_init(vrf_init_module);
David Ahern193125d2015-08-13 14:59:10 -06001471MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1472MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1473MODULE_LICENSE("GPL");
1474MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1475MODULE_VERSION(DRV_VERSION);