blob: d38f11d833fe0f207fc3a588539fb49090dfd02e [file] [log] [blame]
David Ahern193125d2015-08-13 14:59:10 -06001/*
2 * vrf.c: device driver to encapsulate a VRF space
3 *
4 * Copyright (c) 2015 Cumulus Networks. All rights reserved.
5 * Copyright (c) 2015 Shrijeet Mukherjee <shm@cumulusnetworks.com>
6 * Copyright (c) 2015 David Ahern <dsa@cumulusnetworks.com>
7 *
8 * Based on dummy, team and ipvlan drivers
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 */
15
16#include <linux/module.h>
17#include <linux/kernel.h>
18#include <linux/netdevice.h>
19#include <linux/etherdevice.h>
20#include <linux/ip.h>
21#include <linux/init.h>
22#include <linux/moduleparam.h>
23#include <linux/netfilter.h>
24#include <linux/rtnetlink.h>
25#include <net/rtnetlink.h>
26#include <linux/u64_stats_sync.h>
27#include <linux/hashtable.h>
28
29#include <linux/inetdevice.h>
David Ahern8f583362015-08-27 10:10:50 -070030#include <net/arp.h>
David Ahern193125d2015-08-13 14:59:10 -060031#include <net/ip.h>
32#include <net/ip_fib.h>
David Ahern35402e32015-10-12 11:47:09 -070033#include <net/ip6_fib.h>
David Ahern193125d2015-08-13 14:59:10 -060034#include <net/ip6_route.h>
David Ahern193125d2015-08-13 14:59:10 -060035#include <net/route.h>
36#include <net/addrconf.h>
David Ahernee15ee52015-09-29 20:07:12 -070037#include <net/l3mdev.h>
David Ahern1aa6c4f2016-06-08 10:55:40 -070038#include <net/fib_rules.h>
David Ahern193125d2015-08-13 14:59:10 -060039
40#define DRV_NAME "vrf"
41#define DRV_VERSION "1.0"
42
David Ahern1aa6c4f2016-06-08 10:55:40 -070043#define FIB_RULE_PREF 1000 /* default preference for FIB rules */
44static bool add_fib_rules = true;
45
David Ahernec539512015-09-29 20:07:17 -070046struct net_vrf {
David Ahernb0e95cc2016-05-13 12:23:45 -070047 struct rtable __rcu *rth;
David Ahernafe80a42016-06-06 20:50:39 -070048 struct rtable __rcu *rth_local;
David Ahernb0e95cc2016-05-13 12:23:45 -070049 struct rt6_info __rcu *rt6;
David Ahernb4869aa2016-06-06 20:50:40 -070050 struct rt6_info __rcu *rt6_local;
David Ahernec539512015-09-29 20:07:17 -070051 u32 tb_id;
52};
53
David Ahern193125d2015-08-13 14:59:10 -060054struct pcpu_dstats {
55 u64 tx_pkts;
56 u64 tx_bytes;
57 u64 tx_drps;
58 u64 rx_pkts;
59 u64 rx_bytes;
David Ahernafe80a42016-06-06 20:50:39 -070060 u64 rx_drps;
David Ahern193125d2015-08-13 14:59:10 -060061 struct u64_stats_sync syncp;
62};
63
David Ahernafe80a42016-06-06 20:50:39 -070064static void vrf_rx_stats(struct net_device *dev, int len)
65{
66 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
67
68 u64_stats_update_begin(&dstats->syncp);
69 dstats->rx_pkts++;
70 dstats->rx_bytes += len;
71 u64_stats_update_end(&dstats->syncp);
72}
73
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +030074static void vrf_tx_error(struct net_device *vrf_dev, struct sk_buff *skb)
75{
76 vrf_dev->stats.tx_errors++;
77 kfree_skb(skb);
78}
79
stephen hemmingerbc1f4472017-01-06 19:12:52 -080080static void vrf_get_stats64(struct net_device *dev,
81 struct rtnl_link_stats64 *stats)
David Ahern193125d2015-08-13 14:59:10 -060082{
83 int i;
84
85 for_each_possible_cpu(i) {
86 const struct pcpu_dstats *dstats;
87 u64 tbytes, tpkts, tdrops, rbytes, rpkts;
88 unsigned int start;
89
90 dstats = per_cpu_ptr(dev->dstats, i);
91 do {
92 start = u64_stats_fetch_begin_irq(&dstats->syncp);
93 tbytes = dstats->tx_bytes;
94 tpkts = dstats->tx_pkts;
95 tdrops = dstats->tx_drps;
96 rbytes = dstats->rx_bytes;
97 rpkts = dstats->rx_pkts;
98 } while (u64_stats_fetch_retry_irq(&dstats->syncp, start));
99 stats->tx_bytes += tbytes;
100 stats->tx_packets += tpkts;
101 stats->tx_dropped += tdrops;
102 stats->rx_bytes += rbytes;
103 stats->rx_packets += rpkts;
104 }
David Ahern193125d2015-08-13 14:59:10 -0600105}
106
David Aherndcdd43c2017-03-20 11:19:44 -0700107/* by default VRF devices do not have a qdisc and are expected
108 * to be created with only a single queue.
109 */
110static bool qdisc_tx_is_default(const struct net_device *dev)
111{
112 struct netdev_queue *txq;
113 struct Qdisc *qdisc;
114
115 if (dev->num_tx_queues > 1)
116 return false;
117
118 txq = netdev_get_tx_queue(dev, 0);
119 qdisc = rcu_access_pointer(txq->qdisc);
120
121 return !qdisc->enqueue;
122}
123
David Ahernafe80a42016-06-06 20:50:39 -0700124/* Local traffic destined to local address. Reinsert the packet to rx
125 * path, similar to loopback handling.
126 */
127static int vrf_local_xmit(struct sk_buff *skb, struct net_device *dev,
128 struct dst_entry *dst)
129{
130 int len = skb->len;
131
132 skb_orphan(skb);
133
134 skb_dst_set(skb, dst);
135 skb_dst_force(skb);
136
137 /* set pkt_type to avoid skb hitting packet taps twice -
138 * once on Tx and again in Rx processing
139 */
140 skb->pkt_type = PACKET_LOOPBACK;
141
142 skb->protocol = eth_type_trans(skb, dev);
143
144 if (likely(netif_rx(skb) == NET_RX_SUCCESS))
145 vrf_rx_stats(dev, len);
146 else
147 this_cpu_inc(dev->dstats->rx_drps);
148
149 return NETDEV_TX_OK;
150}
151
David Ahern35402e32015-10-12 11:47:09 -0700152#if IS_ENABLED(CONFIG_IPV6)
David Ahern4c1feac2016-09-10 12:09:56 -0700153static int vrf_ip6_local_out(struct net *net, struct sock *sk,
154 struct sk_buff *skb)
155{
156 int err;
157
158 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net,
159 sk, skb, NULL, skb_dst(skb)->dev, dst_output);
160
161 if (likely(err == 1))
162 err = dst_output(net, sk, skb);
163
164 return err;
165}
166
David Ahern35402e32015-10-12 11:47:09 -0700167static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
168 struct net_device *dev)
169{
170 const struct ipv6hdr *iph = ipv6_hdr(skb);
171 struct net *net = dev_net(skb->dev);
172 struct flowi6 fl6 = {
173 /* needed to match OIF rule */
174 .flowi6_oif = dev->ifindex,
175 .flowi6_iif = LOOPBACK_IFINDEX,
176 .daddr = iph->daddr,
177 .saddr = iph->saddr,
178 .flowlabel = ip6_flowinfo(iph),
179 .flowi6_mark = skb->mark,
180 .flowi6_proto = iph->nexthdr,
David Ahernc71ad3d2016-09-10 12:10:02 -0700181 .flowi6_flags = FLOWI_FLAG_SKIP_NH_OIF,
David Ahern35402e32015-10-12 11:47:09 -0700182 };
183 int ret = NET_XMIT_DROP;
184 struct dst_entry *dst;
185 struct dst_entry *dst_null = &net->ipv6.ip6_null_entry->dst;
186
187 dst = ip6_route_output(net, NULL, &fl6);
188 if (dst == dst_null)
189 goto err;
190
191 skb_dst_drop(skb);
David Ahernb4869aa2016-06-06 20:50:40 -0700192
193 /* if dst.dev is loopback or the VRF device again this is locally
194 * originated traffic destined to a local address. Short circuit
195 * to Rx path using our local dst
196 */
197 if (dst->dev == net->loopback_dev || dst->dev == dev) {
198 struct net_vrf *vrf = netdev_priv(dev);
199 struct rt6_info *rt6_local;
200
201 /* release looked up dst and use cached local dst */
202 dst_release(dst);
203
204 rcu_read_lock();
205
206 rt6_local = rcu_dereference(vrf->rt6_local);
207 if (unlikely(!rt6_local)) {
208 rcu_read_unlock();
209 goto err;
210 }
211
212 /* Ordering issue: cached local dst is created on newlink
213 * before the IPv6 initialization. Using the local dst
214 * requires rt6i_idev to be set so make sure it is.
215 */
216 if (unlikely(!rt6_local->rt6i_idev)) {
217 rt6_local->rt6i_idev = in6_dev_get(dev);
218 if (!rt6_local->rt6i_idev) {
219 rcu_read_unlock();
220 goto err;
221 }
222 }
223
224 dst = &rt6_local->dst;
225 dst_hold(dst);
226
227 rcu_read_unlock();
228
229 return vrf_local_xmit(skb, dev, &rt6_local->dst);
230 }
231
David Ahern35402e32015-10-12 11:47:09 -0700232 skb_dst_set(skb, dst);
233
David Ahern911a66f2016-06-06 20:50:38 -0700234 /* strip the ethernet header added for pass through VRF device */
235 __skb_pull(skb, skb_network_offset(skb));
236
David Ahern4c1feac2016-09-10 12:09:56 -0700237 ret = vrf_ip6_local_out(net, skb->sk, skb);
David Ahern35402e32015-10-12 11:47:09 -0700238 if (unlikely(net_xmit_eval(ret)))
239 dev->stats.tx_errors++;
240 else
241 ret = NET_XMIT_SUCCESS;
242
243 return ret;
244err:
245 vrf_tx_error(dev, skb);
246 return NET_XMIT_DROP;
247}
248#else
David Ahern193125d2015-08-13 14:59:10 -0600249static netdev_tx_t vrf_process_v6_outbound(struct sk_buff *skb,
250 struct net_device *dev)
251{
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300252 vrf_tx_error(dev, skb);
253 return NET_XMIT_DROP;
David Ahern193125d2015-08-13 14:59:10 -0600254}
David Ahern35402e32015-10-12 11:47:09 -0700255#endif
David Ahern193125d2015-08-13 14:59:10 -0600256
David Ahernebfc1022016-09-10 12:09:55 -0700257/* based on ip_local_out; can't use it b/c the dst is switched pointing to us */
258static int vrf_ip_local_out(struct net *net, struct sock *sk,
259 struct sk_buff *skb)
260{
261 int err;
262
263 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
264 skb, NULL, skb_dst(skb)->dev, dst_output);
265 if (likely(err == 1))
266 err = dst_output(net, sk, skb);
267
268 return err;
269}
270
David Ahern193125d2015-08-13 14:59:10 -0600271static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
272 struct net_device *vrf_dev)
273{
274 struct iphdr *ip4h = ip_hdr(skb);
275 int ret = NET_XMIT_DROP;
276 struct flowi4 fl4 = {
277 /* needed to match OIF rule */
278 .flowi4_oif = vrf_dev->ifindex,
279 .flowi4_iif = LOOPBACK_IFINDEX,
280 .flowi4_tos = RT_TOS(ip4h->tos),
David Ahernc71ad3d2016-09-10 12:10:02 -0700281 .flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
David Ahern7a18c5b2017-01-10 14:37:35 -0800282 .flowi4_proto = ip4h->protocol,
David Ahern193125d2015-08-13 14:59:10 -0600283 .daddr = ip4h->daddr,
David Ahern7a18c5b2017-01-10 14:37:35 -0800284 .saddr = ip4h->saddr,
David Ahern193125d2015-08-13 14:59:10 -0600285 };
David Ahern911a66f2016-06-06 20:50:38 -0700286 struct net *net = dev_net(vrf_dev);
287 struct rtable *rt;
David Ahern193125d2015-08-13 14:59:10 -0600288
David Ahern911a66f2016-06-06 20:50:38 -0700289 rt = ip_route_output_flow(net, &fl4, NULL);
290 if (IS_ERR(rt))
David Ahern193125d2015-08-13 14:59:10 -0600291 goto err;
292
David Ahern911a66f2016-06-06 20:50:38 -0700293 skb_dst_drop(skb);
David Ahernafe80a42016-06-06 20:50:39 -0700294
295 /* if dst.dev is loopback or the VRF device again this is locally
296 * originated traffic destined to a local address. Short circuit
297 * to Rx path using our local dst
298 */
299 if (rt->dst.dev == net->loopback_dev || rt->dst.dev == vrf_dev) {
300 struct net_vrf *vrf = netdev_priv(vrf_dev);
301 struct rtable *rth_local;
302 struct dst_entry *dst = NULL;
303
304 ip_rt_put(rt);
305
306 rcu_read_lock();
307
308 rth_local = rcu_dereference(vrf->rth_local);
309 if (likely(rth_local)) {
310 dst = &rth_local->dst;
311 dst_hold(dst);
312 }
313
314 rcu_read_unlock();
315
316 if (unlikely(!dst))
317 goto err;
318
319 return vrf_local_xmit(skb, vrf_dev, dst);
320 }
321
David Ahern911a66f2016-06-06 20:50:38 -0700322 skb_dst_set(skb, &rt->dst);
323
324 /* strip the ethernet header added for pass through VRF device */
325 __skb_pull(skb, skb_network_offset(skb));
326
David Ahern193125d2015-08-13 14:59:10 -0600327 if (!ip4h->saddr) {
328 ip4h->saddr = inet_select_addr(skb_dst(skb)->dev, 0,
329 RT_SCOPE_LINK);
330 }
331
David Ahernebfc1022016-09-10 12:09:55 -0700332 ret = vrf_ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
David Ahern193125d2015-08-13 14:59:10 -0600333 if (unlikely(net_xmit_eval(ret)))
334 vrf_dev->stats.tx_errors++;
335 else
336 ret = NET_XMIT_SUCCESS;
337
338out:
339 return ret;
340err:
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300341 vrf_tx_error(vrf_dev, skb);
David Ahern193125d2015-08-13 14:59:10 -0600342 goto out;
343}
344
345static netdev_tx_t is_ip_tx_frame(struct sk_buff *skb, struct net_device *dev)
346{
347 switch (skb->protocol) {
348 case htons(ETH_P_IP):
349 return vrf_process_v4_outbound(skb, dev);
350 case htons(ETH_P_IPV6):
351 return vrf_process_v6_outbound(skb, dev);
352 default:
Nikolay Aleksandrov57b8efa2015-08-19 06:12:29 +0300353 vrf_tx_error(dev, skb);
David Ahern193125d2015-08-13 14:59:10 -0600354 return NET_XMIT_DROP;
355 }
356}
357
358static netdev_tx_t vrf_xmit(struct sk_buff *skb, struct net_device *dev)
359{
David Ahernf7887d42017-03-06 08:53:04 -0800360 int len = skb->len;
David Ahern193125d2015-08-13 14:59:10 -0600361 netdev_tx_t ret = is_ip_tx_frame(skb, dev);
362
363 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
364 struct pcpu_dstats *dstats = this_cpu_ptr(dev->dstats);
365
366 u64_stats_update_begin(&dstats->syncp);
367 dstats->tx_pkts++;
David Ahernf7887d42017-03-06 08:53:04 -0800368 dstats->tx_bytes += len;
David Ahern193125d2015-08-13 14:59:10 -0600369 u64_stats_update_end(&dstats->syncp);
370 } else {
371 this_cpu_inc(dev->dstats->tx_drps);
372 }
373
374 return ret;
375}
376
David Aherndcdd43c2017-03-20 11:19:44 -0700377static int vrf_finish_direct(struct net *net, struct sock *sk,
378 struct sk_buff *skb)
379{
380 struct net_device *vrf_dev = skb->dev;
381
382 if (!list_empty(&vrf_dev->ptype_all) &&
383 likely(skb_headroom(skb) >= ETH_HLEN)) {
384 struct ethhdr *eth = (struct ethhdr *)skb_push(skb, ETH_HLEN);
385
386 ether_addr_copy(eth->h_source, vrf_dev->dev_addr);
387 eth_zero_addr(eth->h_dest);
388 eth->h_proto = skb->protocol;
389
390 rcu_read_lock_bh();
391 dev_queue_xmit_nit(skb, vrf_dev);
392 rcu_read_unlock_bh();
393
394 skb_pull(skb, ETH_HLEN);
395 }
396
397 return 1;
398}
399
David Ahern35402e32015-10-12 11:47:09 -0700400#if IS_ENABLED(CONFIG_IPV6)
David Ahern35402e32015-10-12 11:47:09 -0700401/* modelled after ip6_finish_output2 */
402static int vrf_finish_output6(struct net *net, struct sock *sk,
403 struct sk_buff *skb)
404{
405 struct dst_entry *dst = skb_dst(skb);
406 struct net_device *dev = dst->dev;
407 struct neighbour *neigh;
408 struct in6_addr *nexthop;
409 int ret;
410
David Aherneb63ecc2016-12-14 14:31:11 -0800411 nf_reset(skb);
412
David Ahern35402e32015-10-12 11:47:09 -0700413 skb->protocol = htons(ETH_P_IPV6);
414 skb->dev = dev;
415
416 rcu_read_lock_bh();
417 nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
418 neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
419 if (unlikely(!neigh))
420 neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
421 if (!IS_ERR(neigh)) {
Julian Anastasov4ff06202017-02-06 23:14:12 +0200422 sock_confirm_neigh(skb, neigh);
Julian Anastasovc16ec1852017-02-11 13:49:20 +0200423 ret = neigh_output(neigh, skb);
David Ahern35402e32015-10-12 11:47:09 -0700424 rcu_read_unlock_bh();
425 return ret;
426 }
427 rcu_read_unlock_bh();
428
429 IP6_INC_STATS(dev_net(dst->dev),
430 ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
431 kfree_skb(skb);
432 return -EINVAL;
433}
434
435/* modelled after ip6_output */
436static int vrf_output6(struct net *net, struct sock *sk, struct sk_buff *skb)
437{
438 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
439 net, sk, skb, NULL, skb_dst(skb)->dev,
440 vrf_finish_output6,
441 !(IP6CB(skb)->flags & IP6SKB_REROUTED));
442}
443
David Ahern4c1feac2016-09-10 12:09:56 -0700444/* set dst on skb to send packet to us via dev_xmit path. Allows
445 * packet to go through device based features such as qdisc, netfilter
446 * hooks and packet sockets with skb->dev set to vrf device.
447 */
David Aherna9ec54d2017-03-20 11:19:45 -0700448static struct sk_buff *vrf_ip6_out_redirect(struct net_device *vrf_dev,
449 struct sk_buff *skb)
David Ahern4c1feac2016-09-10 12:09:56 -0700450{
451 struct net_vrf *vrf = netdev_priv(vrf_dev);
452 struct dst_entry *dst = NULL;
453 struct rt6_info *rt6;
454
David Ahern4c1feac2016-09-10 12:09:56 -0700455 rcu_read_lock();
456
457 rt6 = rcu_dereference(vrf->rt6);
458 if (likely(rt6)) {
459 dst = &rt6->dst;
460 dst_hold(dst);
461 }
462
463 rcu_read_unlock();
464
465 if (unlikely(!dst)) {
466 vrf_tx_error(vrf_dev, skb);
467 return NULL;
468 }
469
470 skb_dst_drop(skb);
471 skb_dst_set(skb, dst);
472
473 return skb;
474}
475
David Aherna9ec54d2017-03-20 11:19:45 -0700476static int vrf_output6_direct(struct net *net, struct sock *sk,
477 struct sk_buff *skb)
478{
479 skb->protocol = htons(ETH_P_IPV6);
480
481 return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
482 net, sk, skb, NULL, skb->dev,
483 vrf_finish_direct,
484 !(IPCB(skb)->flags & IPSKB_REROUTED));
485}
486
487static struct sk_buff *vrf_ip6_out_direct(struct net_device *vrf_dev,
488 struct sock *sk,
489 struct sk_buff *skb)
490{
491 struct net *net = dev_net(vrf_dev);
492 int err;
493
494 skb->dev = vrf_dev;
495
496 err = nf_hook(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk,
497 skb, NULL, vrf_dev, vrf_output6_direct);
498
499 if (likely(err == 1))
500 err = vrf_output6_direct(net, sk, skb);
501
502 /* reset skb device */
503 if (likely(err == 1))
504 nf_reset(skb);
505 else
506 skb = NULL;
507
508 return skb;
509}
510
511static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
512 struct sock *sk,
513 struct sk_buff *skb)
514{
515 /* don't divert link scope packets */
516 if (rt6_need_strict(&ipv6_hdr(skb)->daddr))
517 return skb;
518
519 if (qdisc_tx_is_default(vrf_dev))
520 return vrf_ip6_out_direct(vrf_dev, sk, skb);
521
522 return vrf_ip6_out_redirect(vrf_dev, skb);
523}
524
David Ahernb0e95cc2016-05-13 12:23:45 -0700525/* holding rtnl */
David Ahern810e5302016-06-14 11:37:21 -0700526static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
David Ahern35402e32015-10-12 11:47:09 -0700527{
David Ahernb0e95cc2016-05-13 12:23:45 -0700528 struct rt6_info *rt6 = rtnl_dereference(vrf->rt6);
David Ahernb4869aa2016-06-06 20:50:40 -0700529 struct rt6_info *rt6_local = rtnl_dereference(vrf->rt6_local);
David Ahern810e5302016-06-14 11:37:21 -0700530 struct net *net = dev_net(dev);
531 struct dst_entry *dst;
David Ahernb0e95cc2016-05-13 12:23:45 -0700532
David Ahernb4869aa2016-06-06 20:50:40 -0700533 RCU_INIT_POINTER(vrf->rt6, NULL);
534 RCU_INIT_POINTER(vrf->rt6_local, NULL);
535 synchronize_rcu();
David Ahernb0e95cc2016-05-13 12:23:45 -0700536
David Ahern810e5302016-06-14 11:37:21 -0700537 /* move dev in dst's to loopback so this VRF device can be deleted
538 * - based on dst_ifdown
539 */
540 if (rt6) {
541 dst = &rt6->dst;
542 dev_put(dst->dev);
543 dst->dev = net->loopback_dev;
544 dev_hold(dst->dev);
545 dst_release(dst);
546 }
David Ahernb4869aa2016-06-06 20:50:40 -0700547
548 if (rt6_local) {
David Ahern3dc857f2017-03-17 16:07:11 -0700549 if (rt6_local->rt6i_idev) {
David Ahernb4869aa2016-06-06 20:50:40 -0700550 in6_dev_put(rt6_local->rt6i_idev);
David Ahern3dc857f2017-03-17 16:07:11 -0700551 rt6_local->rt6i_idev = NULL;
552 }
David Ahernb4869aa2016-06-06 20:50:40 -0700553
David Ahern810e5302016-06-14 11:37:21 -0700554 dst = &rt6_local->dst;
555 dev_put(dst->dev);
556 dst->dev = net->loopback_dev;
557 dev_hold(dst->dev);
558 dst_release(dst);
David Ahernb4869aa2016-06-06 20:50:40 -0700559 }
David Ahern35402e32015-10-12 11:47:09 -0700560}
561
562static int vrf_rt6_create(struct net_device *dev)
563{
David Ahernb4869aa2016-06-06 20:50:40 -0700564 int flags = DST_HOST | DST_NOPOLICY | DST_NOXFRM | DST_NOCACHE;
David Ahern35402e32015-10-12 11:47:09 -0700565 struct net_vrf *vrf = netdev_priv(dev);
David Ahern9ab179d2016-04-07 11:10:06 -0700566 struct net *net = dev_net(dev);
David Ahernb3b46632016-05-04 21:46:12 -0700567 struct fib6_table *rt6i_table;
David Ahernb4869aa2016-06-06 20:50:40 -0700568 struct rt6_info *rt6, *rt6_local;
David Ahern35402e32015-10-12 11:47:09 -0700569 int rc = -ENOMEM;
570
David Aherne4348632016-06-09 10:21:00 -0700571 /* IPv6 can be CONFIG enabled and then disabled runtime */
572 if (!ipv6_mod_enabled())
573 return 0;
574
David Ahernb3b46632016-05-04 21:46:12 -0700575 rt6i_table = fib6_new_table(net, vrf->tb_id);
576 if (!rt6i_table)
577 goto out;
578
David Ahernb4869aa2016-06-06 20:50:40 -0700579 /* create a dst for routing packets out a VRF device */
580 rt6 = ip6_dst_alloc(net, dev, flags);
David Ahern35402e32015-10-12 11:47:09 -0700581 if (!rt6)
582 goto out;
583
David Ahern9ab179d2016-04-07 11:10:06 -0700584 dst_hold(&rt6->dst);
David Ahernb3b46632016-05-04 21:46:12 -0700585
586 rt6->rt6i_table = rt6i_table;
587 rt6->dst.output = vrf_output6;
David Ahernb4869aa2016-06-06 20:50:40 -0700588
589 /* create a dst for local routing - packets sent locally
590 * to local address via the VRF device as a loopback
591 */
592 rt6_local = ip6_dst_alloc(net, dev, flags);
593 if (!rt6_local) {
594 dst_release(&rt6->dst);
595 goto out;
596 }
597
598 dst_hold(&rt6_local->dst);
599
600 rt6_local->rt6i_idev = in6_dev_get(dev);
601 rt6_local->rt6i_flags = RTF_UP | RTF_NONEXTHOP | RTF_LOCAL;
602 rt6_local->rt6i_table = rt6i_table;
603 rt6_local->dst.input = ip6_input;
604
David Ahernb0e95cc2016-05-13 12:23:45 -0700605 rcu_assign_pointer(vrf->rt6, rt6);
David Ahernb4869aa2016-06-06 20:50:40 -0700606 rcu_assign_pointer(vrf->rt6_local, rt6_local);
David Ahernb0e95cc2016-05-13 12:23:45 -0700607
David Ahern35402e32015-10-12 11:47:09 -0700608 rc = 0;
609out:
610 return rc;
611}
612#else
David Ahern4c1feac2016-09-10 12:09:56 -0700613static struct sk_buff *vrf_ip6_out(struct net_device *vrf_dev,
614 struct sock *sk,
615 struct sk_buff *skb)
616{
617 return skb;
618}
619
David Ahern810e5302016-06-14 11:37:21 -0700620static void vrf_rt6_release(struct net_device *dev, struct net_vrf *vrf)
David Ahern35402e32015-10-12 11:47:09 -0700621{
622}
623
624static int vrf_rt6_create(struct net_device *dev)
625{
626 return 0;
627}
628#endif
629
David Ahern8f583362015-08-27 10:10:50 -0700630/* modelled after ip_finish_output2 */
Eric W. Biederman0c4b51f2015-09-15 20:04:18 -0500631static int vrf_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
David Ahern193125d2015-08-13 14:59:10 -0600632{
David Ahern8f583362015-08-27 10:10:50 -0700633 struct dst_entry *dst = skb_dst(skb);
634 struct rtable *rt = (struct rtable *)dst;
635 struct net_device *dev = dst->dev;
636 unsigned int hh_len = LL_RESERVED_SPACE(dev);
637 struct neighbour *neigh;
638 u32 nexthop;
639 int ret = -EINVAL;
640
David Aherneb63ecc2016-12-14 14:31:11 -0800641 nf_reset(skb);
642
David Ahern8f583362015-08-27 10:10:50 -0700643 /* Be paranoid, rather than too clever. */
644 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
645 struct sk_buff *skb2;
646
647 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
648 if (!skb2) {
649 ret = -ENOMEM;
650 goto err;
651 }
652 if (skb->sk)
653 skb_set_owner_w(skb2, skb->sk);
654
655 consume_skb(skb);
656 skb = skb2;
657 }
658
659 rcu_read_lock_bh();
660
661 nexthop = (__force u32)rt_nexthop(rt, ip_hdr(skb)->daddr);
662 neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
663 if (unlikely(!neigh))
664 neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
Julian Anastasov4ff06202017-02-06 23:14:12 +0200665 if (!IS_ERR(neigh)) {
666 sock_confirm_neigh(skb, neigh);
Julian Anastasovc16ec1852017-02-11 13:49:20 +0200667 ret = neigh_output(neigh, skb);
Julian Anastasov4ff06202017-02-06 23:14:12 +0200668 }
David Ahern8f583362015-08-27 10:10:50 -0700669
670 rcu_read_unlock_bh();
671err:
672 if (unlikely(ret < 0))
673 vrf_tx_error(skb->dev, skb);
674 return ret;
David Ahern193125d2015-08-13 14:59:10 -0600675}
676
Eric W. Biedermanede20592015-10-07 16:48:47 -0500677static int vrf_output(struct net *net, struct sock *sk, struct sk_buff *skb)
David Ahern193125d2015-08-13 14:59:10 -0600678{
679 struct net_device *dev = skb_dst(skb)->dev;
680
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500681 IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
David Ahern193125d2015-08-13 14:59:10 -0600682
683 skb->dev = dev;
684 skb->protocol = htons(ETH_P_IP);
685
Eric W. Biederman29a26a52015-09-15 20:04:16 -0500686 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
687 net, sk, skb, NULL, dev,
David Ahern8f583362015-08-27 10:10:50 -0700688 vrf_finish_output,
David Ahern193125d2015-08-13 14:59:10 -0600689 !(IPCB(skb)->flags & IPSKB_REROUTED));
690}
691
David Ahernebfc1022016-09-10 12:09:55 -0700692/* set dst on skb to send packet to us via dev_xmit path. Allows
693 * packet to go through device based features such as qdisc, netfilter
694 * hooks and packet sockets with skb->dev set to vrf device.
695 */
David Aherndcdd43c2017-03-20 11:19:44 -0700696static struct sk_buff *vrf_ip_out_redirect(struct net_device *vrf_dev,
697 struct sk_buff *skb)
David Ahernebfc1022016-09-10 12:09:55 -0700698{
699 struct net_vrf *vrf = netdev_priv(vrf_dev);
700 struct dst_entry *dst = NULL;
701 struct rtable *rth;
702
703 rcu_read_lock();
704
705 rth = rcu_dereference(vrf->rth);
706 if (likely(rth)) {
707 dst = &rth->dst;
708 dst_hold(dst);
709 }
710
711 rcu_read_unlock();
712
713 if (unlikely(!dst)) {
714 vrf_tx_error(vrf_dev, skb);
715 return NULL;
716 }
717
718 skb_dst_drop(skb);
719 skb_dst_set(skb, dst);
720
721 return skb;
722}
723
David Aherndcdd43c2017-03-20 11:19:44 -0700724static int vrf_output_direct(struct net *net, struct sock *sk,
725 struct sk_buff *skb)
726{
727 skb->protocol = htons(ETH_P_IP);
728
729 return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
730 net, sk, skb, NULL, skb->dev,
731 vrf_finish_direct,
732 !(IPCB(skb)->flags & IPSKB_REROUTED));
733}
734
735static struct sk_buff *vrf_ip_out_direct(struct net_device *vrf_dev,
736 struct sock *sk,
737 struct sk_buff *skb)
738{
739 struct net *net = dev_net(vrf_dev);
740 int err;
741
742 skb->dev = vrf_dev;
743
744 err = nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk,
745 skb, NULL, vrf_dev, vrf_output_direct);
746
747 if (likely(err == 1))
748 err = vrf_output_direct(net, sk, skb);
749
750 /* reset skb device */
751 if (likely(err == 1))
752 nf_reset(skb);
753 else
754 skb = NULL;
755
756 return skb;
757}
758
759static struct sk_buff *vrf_ip_out(struct net_device *vrf_dev,
760 struct sock *sk,
761 struct sk_buff *skb)
762{
763 /* don't divert multicast */
764 if (ipv4_is_multicast(ip_hdr(skb)->daddr))
765 return skb;
766
767 if (qdisc_tx_is_default(vrf_dev))
768 return vrf_ip_out_direct(vrf_dev, sk, skb);
769
770 return vrf_ip_out_redirect(vrf_dev, skb);
771}
772
David Ahernebfc1022016-09-10 12:09:55 -0700773/* called with rcu lock held */
774static struct sk_buff *vrf_l3_out(struct net_device *vrf_dev,
775 struct sock *sk,
776 struct sk_buff *skb,
777 u16 proto)
778{
779 switch (proto) {
780 case AF_INET:
781 return vrf_ip_out(vrf_dev, sk, skb);
David Ahern4c1feac2016-09-10 12:09:56 -0700782 case AF_INET6:
783 return vrf_ip6_out(vrf_dev, sk, skb);
David Ahernebfc1022016-09-10 12:09:55 -0700784 }
785
786 return skb;
787}
788
David Ahernb0e95cc2016-05-13 12:23:45 -0700789/* holding rtnl */
David Ahern810e5302016-06-14 11:37:21 -0700790static void vrf_rtable_release(struct net_device *dev, struct net_vrf *vrf)
David Ahern193125d2015-08-13 14:59:10 -0600791{
David Ahernb0e95cc2016-05-13 12:23:45 -0700792 struct rtable *rth = rtnl_dereference(vrf->rth);
David Ahernafe80a42016-06-06 20:50:39 -0700793 struct rtable *rth_local = rtnl_dereference(vrf->rth_local);
David Ahern810e5302016-06-14 11:37:21 -0700794 struct net *net = dev_net(dev);
795 struct dst_entry *dst;
David Ahern193125d2015-08-13 14:59:10 -0600796
David Ahernafe80a42016-06-06 20:50:39 -0700797 RCU_INIT_POINTER(vrf->rth, NULL);
798 RCU_INIT_POINTER(vrf->rth_local, NULL);
799 synchronize_rcu();
David Ahernb0e95cc2016-05-13 12:23:45 -0700800
David Ahern810e5302016-06-14 11:37:21 -0700801 /* move dev in dst's to loopback so this VRF device can be deleted
802 * - based on dst_ifdown
803 */
804 if (rth) {
805 dst = &rth->dst;
806 dev_put(dst->dev);
807 dst->dev = net->loopback_dev;
808 dev_hold(dst->dev);
809 dst_release(dst);
810 }
David Ahernafe80a42016-06-06 20:50:39 -0700811
David Ahern810e5302016-06-14 11:37:21 -0700812 if (rth_local) {
813 dst = &rth_local->dst;
814 dev_put(dst->dev);
815 dst->dev = net->loopback_dev;
816 dev_hold(dst->dev);
817 dst_release(dst);
818 }
David Ahern193125d2015-08-13 14:59:10 -0600819}
820
David Ahernb0e95cc2016-05-13 12:23:45 -0700821static int vrf_rtable_create(struct net_device *dev)
David Ahern193125d2015-08-13 14:59:10 -0600822{
David Ahernb7503e02015-09-02 13:58:35 -0700823 struct net_vrf *vrf = netdev_priv(dev);
David Ahernafe80a42016-06-06 20:50:39 -0700824 struct rtable *rth, *rth_local;
David Ahern193125d2015-08-13 14:59:10 -0600825
David Ahernb3b46632016-05-04 21:46:12 -0700826 if (!fib_new_table(dev_net(dev), vrf->tb_id))
David Ahernb0e95cc2016-05-13 12:23:45 -0700827 return -ENOMEM;
David Ahernb3b46632016-05-04 21:46:12 -0700828
David Ahernafe80a42016-06-06 20:50:39 -0700829 /* create a dst for routing packets out through a VRF device */
David Ahern9ab179d2016-04-07 11:10:06 -0700830 rth = rt_dst_alloc(dev, 0, RTN_UNICAST, 1, 1, 0);
David Ahernb0e95cc2016-05-13 12:23:45 -0700831 if (!rth)
832 return -ENOMEM;
David Ahern193125d2015-08-13 14:59:10 -0600833
David Ahernafe80a42016-06-06 20:50:39 -0700834 /* create a dst for local ingress routing - packets sent locally
835 * to local address via the VRF device as a loopback
836 */
837 rth_local = rt_dst_alloc(dev, RTCF_LOCAL, RTN_LOCAL, 1, 1, 0);
838 if (!rth_local) {
839 dst_release(&rth->dst);
840 return -ENOMEM;
841 }
842
David Ahernb0e95cc2016-05-13 12:23:45 -0700843 rth->dst.output = vrf_output;
844 rth->rt_table_id = vrf->tb_id;
845
David Ahernafe80a42016-06-06 20:50:39 -0700846 rth_local->rt_table_id = vrf->tb_id;
847
David Ahernb0e95cc2016-05-13 12:23:45 -0700848 rcu_assign_pointer(vrf->rth, rth);
David Ahernafe80a42016-06-06 20:50:39 -0700849 rcu_assign_pointer(vrf->rth_local, rth_local);
David Ahernb0e95cc2016-05-13 12:23:45 -0700850
851 return 0;
David Ahern193125d2015-08-13 14:59:10 -0600852}
853
854/**************************** device handling ********************/
855
856/* cycle interface to flush neighbor cache and move routes across tables */
857static void cycle_netdev(struct net_device *dev)
858{
859 unsigned int flags = dev->flags;
860 int ret;
861
862 if (!netif_running(dev))
863 return;
864
865 ret = dev_change_flags(dev, flags & ~IFF_UP);
866 if (ret >= 0)
867 ret = dev_change_flags(dev, flags);
868
869 if (ret < 0) {
870 netdev_err(dev,
871 "Failed to cycle device %s; route tables might be wrong!\n",
872 dev->name);
873 }
874}
875
David Ahern193125d2015-08-13 14:59:10 -0600876static int do_vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
877{
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100878 int ret;
David Ahern193125d2015-08-13 14:59:10 -0600879
David Ahern26d31ac2017-04-26 07:58:22 -0700880 /* do not allow loopback device to be enslaved to a VRF.
881 * The vrf device acts as the loopback for the vrf.
882 */
883 if (port_dev == dev_net(dev)->loopback_dev)
884 return -EOPNOTSUPP;
885
Ido Schimmelfdeea7b2017-03-16 09:08:15 +0100886 port_dev->priv_flags |= IFF_L3MDEV_SLAVE;
Jiri Pirko29bf24a2015-12-03 12:12:11 +0100887 ret = netdev_master_upper_dev_link(port_dev, dev, NULL, NULL);
David Ahern193125d2015-08-13 14:59:10 -0600888 if (ret < 0)
Ido Schimmelfdeea7b2017-03-16 09:08:15 +0100889 goto err;
David Ahern193125d2015-08-13 14:59:10 -0600890
David Ahern193125d2015-08-13 14:59:10 -0600891 cycle_netdev(port_dev);
892
893 return 0;
Ido Schimmelfdeea7b2017-03-16 09:08:15 +0100894
895err:
896 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
897 return ret;
David Ahern193125d2015-08-13 14:59:10 -0600898}
899
900static int vrf_add_slave(struct net_device *dev, struct net_device *port_dev)
901{
David Ahernfee6d4c2015-10-05 08:51:24 -0700902 if (netif_is_l3_master(port_dev) || netif_is_l3_slave(port_dev))
David Ahern193125d2015-08-13 14:59:10 -0600903 return -EINVAL;
904
905 return do_vrf_add_slave(dev, port_dev);
906}
907
908/* inverse of do_vrf_add_slave */
909static int do_vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
910{
David Ahern193125d2015-08-13 14:59:10 -0600911 netdev_upper_dev_unlink(port_dev, dev);
David Ahernfee6d4c2015-10-05 08:51:24 -0700912 port_dev->priv_flags &= ~IFF_L3MDEV_SLAVE;
David Ahern193125d2015-08-13 14:59:10 -0600913
David Ahern193125d2015-08-13 14:59:10 -0600914 cycle_netdev(port_dev);
915
David Ahern193125d2015-08-13 14:59:10 -0600916 return 0;
917}
918
919static int vrf_del_slave(struct net_device *dev, struct net_device *port_dev)
920{
David Ahern193125d2015-08-13 14:59:10 -0600921 return do_vrf_del_slave(dev, port_dev);
922}
923
924static void vrf_dev_uninit(struct net_device *dev)
925{
926 struct net_vrf *vrf = netdev_priv(dev);
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100927 struct net_device *port_dev;
928 struct list_head *iter;
David Ahern193125d2015-08-13 14:59:10 -0600929
David Ahern810e5302016-06-14 11:37:21 -0700930 vrf_rtable_release(dev, vrf);
931 vrf_rt6_release(dev, vrf);
David Ahern193125d2015-08-13 14:59:10 -0600932
Nikolay Aleksandrovbad53162015-11-24 14:29:16 +0100933 netdev_for_each_lower_dev(dev, port_dev, iter)
934 vrf_del_slave(dev, port_dev);
David Ahern193125d2015-08-13 14:59:10 -0600935
Nikolay Aleksandrov3a4a27d2015-08-18 20:28:03 +0300936 free_percpu(dev->dstats);
David Ahern193125d2015-08-13 14:59:10 -0600937 dev->dstats = NULL;
938}
939
940static int vrf_dev_init(struct net_device *dev)
941{
942 struct net_vrf *vrf = netdev_priv(dev);
943
David Ahern193125d2015-08-13 14:59:10 -0600944 dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
945 if (!dev->dstats)
946 goto out_nomem;
947
948 /* create the default dst which points back to us */
David Ahernb0e95cc2016-05-13 12:23:45 -0700949 if (vrf_rtable_create(dev) != 0)
David Ahern193125d2015-08-13 14:59:10 -0600950 goto out_stats;
951
David Ahern35402e32015-10-12 11:47:09 -0700952 if (vrf_rt6_create(dev) != 0)
953 goto out_rth;
954
David Ahern193125d2015-08-13 14:59:10 -0600955 dev->flags = IFF_MASTER | IFF_NOARP;
956
David Ahernb87ab6b2016-06-01 21:16:39 -0700957 /* MTU is irrelevant for VRF device; set to 64k similar to lo */
958 dev->mtu = 64 * 1024;
959
960 /* similarly, oper state is irrelevant; set to up to avoid confusion */
961 dev->operstate = IF_OPER_UP;
Eric Dumazet78e7a2a2016-06-09 07:45:13 -0700962 netdev_lockdep_set_classes(dev);
David Ahern193125d2015-08-13 14:59:10 -0600963 return 0;
964
David Ahern35402e32015-10-12 11:47:09 -0700965out_rth:
David Ahern810e5302016-06-14 11:37:21 -0700966 vrf_rtable_release(dev, vrf);
David Ahern193125d2015-08-13 14:59:10 -0600967out_stats:
968 free_percpu(dev->dstats);
969 dev->dstats = NULL;
970out_nomem:
971 return -ENOMEM;
972}
973
974static const struct net_device_ops vrf_netdev_ops = {
975 .ndo_init = vrf_dev_init,
976 .ndo_uninit = vrf_dev_uninit,
977 .ndo_start_xmit = vrf_xmit,
978 .ndo_get_stats64 = vrf_get_stats64,
979 .ndo_add_slave = vrf_add_slave,
980 .ndo_del_slave = vrf_del_slave,
981};
982
David Ahernee15ee52015-09-29 20:07:12 -0700983static u32 vrf_fib_table(const struct net_device *dev)
984{
985 struct net_vrf *vrf = netdev_priv(dev);
986
987 return vrf->tb_id;
988}
989
David Ahern73e20b72016-07-04 18:47:41 -0700990static int vrf_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
991{
Gao Feng1a4a5bf2017-05-09 18:27:33 +0800992 kfree_skb(skb);
David Ahern73e20b72016-07-04 18:47:41 -0700993 return 0;
994}
995
996static struct sk_buff *vrf_rcv_nfhook(u8 pf, unsigned int hook,
997 struct sk_buff *skb,
998 struct net_device *dev)
999{
1000 struct net *net = dev_net(dev);
1001
Gao Feng1a4a5bf2017-05-09 18:27:33 +08001002 if (nf_hook(pf, hook, net, NULL, skb, dev, NULL, vrf_rcv_finish) != 1)
David Ahern73e20b72016-07-04 18:47:41 -07001003 skb = NULL; /* kfree_skb(skb) handled by nf code */
1004
1005 return skb;
1006}
1007
David Ahern35402e32015-10-12 11:47:09 -07001008#if IS_ENABLED(CONFIG_IPV6)
David Ahern74b20582016-05-10 11:19:50 -07001009/* neighbor handling is done with actual device; do not want
1010 * to flip skb->dev for those ndisc packets. This really fails
1011 * for multiple next protocols (e.g., NEXTHDR_HOP). But it is
1012 * a start.
1013 */
1014static bool ipv6_ndisc_frame(const struct sk_buff *skb)
1015{
1016 const struct ipv6hdr *iph = ipv6_hdr(skb);
1017 bool rc = false;
1018
1019 if (iph->nexthdr == NEXTHDR_ICMP) {
1020 const struct icmp6hdr *icmph;
1021 struct icmp6hdr _icmph;
1022
1023 icmph = skb_header_pointer(skb, sizeof(*iph),
1024 sizeof(_icmph), &_icmph);
1025 if (!icmph)
1026 goto out;
1027
1028 switch (icmph->icmp6_type) {
1029 case NDISC_ROUTER_SOLICITATION:
1030 case NDISC_ROUTER_ADVERTISEMENT:
1031 case NDISC_NEIGHBOUR_SOLICITATION:
1032 case NDISC_NEIGHBOUR_ADVERTISEMENT:
1033 case NDISC_REDIRECT:
1034 rc = true;
1035 break;
1036 }
1037 }
1038
1039out:
1040 return rc;
1041}
1042
David Ahern9ff74382016-06-13 13:44:19 -07001043static struct rt6_info *vrf_ip6_route_lookup(struct net *net,
1044 const struct net_device *dev,
1045 struct flowi6 *fl6,
1046 int ifindex,
1047 int flags)
1048{
1049 struct net_vrf *vrf = netdev_priv(dev);
1050 struct fib6_table *table = NULL;
1051 struct rt6_info *rt6;
1052
1053 rcu_read_lock();
1054
1055 /* fib6_table does not have a refcnt and can not be freed */
1056 rt6 = rcu_dereference(vrf->rt6);
1057 if (likely(rt6))
1058 table = rt6->rt6i_table;
1059
1060 rcu_read_unlock();
1061
1062 if (!table)
1063 return NULL;
1064
1065 return ip6_pol_route(net, table, ifindex, fl6, flags);
1066}
1067
1068static void vrf_ip6_input_dst(struct sk_buff *skb, struct net_device *vrf_dev,
1069 int ifindex)
1070{
1071 const struct ipv6hdr *iph = ipv6_hdr(skb);
1072 struct flowi6 fl6 = {
1073 .daddr = iph->daddr,
1074 .saddr = iph->saddr,
1075 .flowlabel = ip6_flowinfo(iph),
1076 .flowi6_mark = skb->mark,
1077 .flowi6_proto = iph->nexthdr,
1078 .flowi6_iif = ifindex,
1079 };
1080 struct net *net = dev_net(vrf_dev);
1081 struct rt6_info *rt6;
1082
1083 rt6 = vrf_ip6_route_lookup(net, vrf_dev, &fl6, ifindex,
1084 RT6_LOOKUP_F_HAS_SADDR | RT6_LOOKUP_F_IFACE);
1085 if (unlikely(!rt6))
1086 return;
1087
1088 if (unlikely(&rt6->dst == &net->ipv6.ip6_null_entry->dst))
1089 return;
1090
1091 skb_dst_set(skb, &rt6->dst);
1092}
1093
David Ahern74b20582016-05-10 11:19:50 -07001094static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1095 struct sk_buff *skb)
1096{
David Ahern9ff74382016-06-13 13:44:19 -07001097 int orig_iif = skb->skb_iif;
1098 bool need_strict;
1099
David Ahernb4869aa2016-06-06 20:50:40 -07001100 /* loopback traffic; do not push through packet taps again.
1101 * Reset pkt_type for upper layers to process skb
1102 */
1103 if (skb->pkt_type == PACKET_LOOPBACK) {
1104 skb->dev = vrf_dev;
1105 skb->skb_iif = vrf_dev->ifindex;
David Aherna04a4802016-10-16 20:02:52 -07001106 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
David Ahernb4869aa2016-06-06 20:50:40 -07001107 skb->pkt_type = PACKET_HOST;
1108 goto out;
1109 }
1110
David Ahern9ff74382016-06-13 13:44:19 -07001111 /* if packet is NDISC or addressed to multicast or link-local
1112 * then keep the ingress interface
1113 */
1114 need_strict = rt6_need_strict(&ipv6_hdr(skb)->daddr);
1115 if (!ipv6_ndisc_frame(skb) && !need_strict) {
David Ahern926d93a2017-01-03 09:37:55 -08001116 vrf_rx_stats(vrf_dev, skb->len);
David Ahern74b20582016-05-10 11:19:50 -07001117 skb->dev = vrf_dev;
1118 skb->skb_iif = vrf_dev->ifindex;
1119
David Aherna9ec54d2017-03-20 11:19:45 -07001120 if (!list_empty(&vrf_dev->ptype_all)) {
1121 skb_push(skb, skb->mac_len);
1122 dev_queue_xmit_nit(skb, vrf_dev);
1123 skb_pull(skb, skb->mac_len);
1124 }
David Ahern74b20582016-05-10 11:19:50 -07001125
1126 IP6CB(skb)->flags |= IP6SKB_L3SLAVE;
1127 }
1128
David Ahern9ff74382016-06-13 13:44:19 -07001129 if (need_strict)
1130 vrf_ip6_input_dst(skb, vrf_dev, orig_iif);
1131
David Ahern73e20b72016-07-04 18:47:41 -07001132 skb = vrf_rcv_nfhook(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, vrf_dev);
David Ahernb4869aa2016-06-06 20:50:40 -07001133out:
David Ahern74b20582016-05-10 11:19:50 -07001134 return skb;
1135}
1136
1137#else
1138static struct sk_buff *vrf_ip6_rcv(struct net_device *vrf_dev,
1139 struct sk_buff *skb)
1140{
1141 return skb;
1142}
1143#endif
1144
1145static struct sk_buff *vrf_ip_rcv(struct net_device *vrf_dev,
1146 struct sk_buff *skb)
1147{
1148 skb->dev = vrf_dev;
1149 skb->skb_iif = vrf_dev->ifindex;
David Aherna04a4802016-10-16 20:02:52 -07001150 IPCB(skb)->flags |= IPSKB_L3SLAVE;
David Ahern74b20582016-05-10 11:19:50 -07001151
David Aherne58e4152016-10-31 15:54:00 -07001152 if (ipv4_is_multicast(ip_hdr(skb)->daddr))
1153 goto out;
1154
David Ahernafe80a42016-06-06 20:50:39 -07001155 /* loopback traffic; do not push through packet taps again.
1156 * Reset pkt_type for upper layers to process skb
1157 */
1158 if (skb->pkt_type == PACKET_LOOPBACK) {
1159 skb->pkt_type = PACKET_HOST;
1160 goto out;
1161 }
1162
David Ahern926d93a2017-01-03 09:37:55 -08001163 vrf_rx_stats(vrf_dev, skb->len);
1164
David Aherndcdd43c2017-03-20 11:19:44 -07001165 if (!list_empty(&vrf_dev->ptype_all)) {
1166 skb_push(skb, skb->mac_len);
1167 dev_queue_xmit_nit(skb, vrf_dev);
1168 skb_pull(skb, skb->mac_len);
1169 }
David Ahern74b20582016-05-10 11:19:50 -07001170
David Ahern73e20b72016-07-04 18:47:41 -07001171 skb = vrf_rcv_nfhook(NFPROTO_IPV4, NF_INET_PRE_ROUTING, skb, vrf_dev);
David Ahernafe80a42016-06-06 20:50:39 -07001172out:
David Ahern74b20582016-05-10 11:19:50 -07001173 return skb;
1174}
1175
1176/* called with rcu lock held */
1177static struct sk_buff *vrf_l3_rcv(struct net_device *vrf_dev,
1178 struct sk_buff *skb,
1179 u16 proto)
1180{
1181 switch (proto) {
1182 case AF_INET:
1183 return vrf_ip_rcv(vrf_dev, skb);
1184 case AF_INET6:
1185 return vrf_ip6_rcv(vrf_dev, skb);
1186 }
1187
1188 return skb;
1189}
1190
1191#if IS_ENABLED(CONFIG_IPV6)
David Ahern4c1feac2016-09-10 12:09:56 -07001192/* send to link-local or multicast address via interface enslaved to
1193 * VRF device. Force lookup to VRF table without changing flow struct
1194 */
1195static struct dst_entry *vrf_link_scope_lookup(const struct net_device *dev,
1196 struct flowi6 *fl6)
David Ahern35402e32015-10-12 11:47:09 -07001197{
David Ahern9ff74382016-06-13 13:44:19 -07001198 struct net *net = dev_net(dev);
David Ahern4c1feac2016-09-10 12:09:56 -07001199 int flags = RT6_LOOKUP_F_IFACE;
David Ahernb0e95cc2016-05-13 12:23:45 -07001200 struct dst_entry *dst = NULL;
David Ahern9ff74382016-06-13 13:44:19 -07001201 struct rt6_info *rt;
David Ahern35402e32015-10-12 11:47:09 -07001202
David Ahern4c1feac2016-09-10 12:09:56 -07001203 /* VRF device does not have a link-local address and
1204 * sending packets to link-local or mcast addresses over
1205 * a VRF device does not make sense
1206 */
1207 if (fl6->flowi6_oif == dev->ifindex) {
1208 dst = &net->ipv6.ip6_null_entry->dst;
1209 dst_hold(dst);
1210 return dst;
David Ahern35402e32015-10-12 11:47:09 -07001211 }
1212
David Ahern4c1feac2016-09-10 12:09:56 -07001213 if (!ipv6_addr_any(&fl6->saddr))
1214 flags |= RT6_LOOKUP_F_HAS_SADDR;
1215
1216 rt = vrf_ip6_route_lookup(net, dev, fl6, fl6->flowi6_oif, flags);
1217 if (rt)
1218 dst = &rt->dst;
David Ahern9ff74382016-06-13 13:44:19 -07001219
David Ahernb0e95cc2016-05-13 12:23:45 -07001220 return dst;
David Ahern35402e32015-10-12 11:47:09 -07001221}
1222#endif
1223
David Ahernee15ee52015-09-29 20:07:12 -07001224static const struct l3mdev_ops vrf_l3mdev_ops = {
1225 .l3mdev_fib_table = vrf_fib_table,
David Ahern74b20582016-05-10 11:19:50 -07001226 .l3mdev_l3_rcv = vrf_l3_rcv,
David Ahernebfc1022016-09-10 12:09:55 -07001227 .l3mdev_l3_out = vrf_l3_out,
David Ahern35402e32015-10-12 11:47:09 -07001228#if IS_ENABLED(CONFIG_IPV6)
David Ahern4c1feac2016-09-10 12:09:56 -07001229 .l3mdev_link_scope_lookup = vrf_link_scope_lookup,
David Ahern35402e32015-10-12 11:47:09 -07001230#endif
David Ahernee15ee52015-09-29 20:07:12 -07001231};
1232
David Ahern193125d2015-08-13 14:59:10 -06001233static void vrf_get_drvinfo(struct net_device *dev,
1234 struct ethtool_drvinfo *info)
1235{
1236 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1237 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1238}
1239
1240static const struct ethtool_ops vrf_ethtool_ops = {
1241 .get_drvinfo = vrf_get_drvinfo,
1242};
1243
David Ahern1aa6c4f2016-06-08 10:55:40 -07001244static inline size_t vrf_fib_rule_nl_size(void)
1245{
1246 size_t sz;
1247
1248 sz = NLMSG_ALIGN(sizeof(struct fib_rule_hdr));
1249 sz += nla_total_size(sizeof(u8)); /* FRA_L3MDEV */
1250 sz += nla_total_size(sizeof(u32)); /* FRA_PRIORITY */
1251
1252 return sz;
1253}
1254
1255static int vrf_fib_rule(const struct net_device *dev, __u8 family, bool add_it)
1256{
1257 struct fib_rule_hdr *frh;
1258 struct nlmsghdr *nlh;
1259 struct sk_buff *skb;
1260 int err;
1261
David Aherne4348632016-06-09 10:21:00 -07001262 if (family == AF_INET6 && !ipv6_mod_enabled())
1263 return 0;
1264
David Ahern1aa6c4f2016-06-08 10:55:40 -07001265 skb = nlmsg_new(vrf_fib_rule_nl_size(), GFP_KERNEL);
1266 if (!skb)
1267 return -ENOMEM;
1268
1269 nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*frh), 0);
1270 if (!nlh)
1271 goto nla_put_failure;
1272
1273 /* rule only needs to appear once */
David Ahern426c87c2017-04-13 10:57:15 -06001274 nlh->nlmsg_flags |= NLM_F_EXCL;
David Ahern1aa6c4f2016-06-08 10:55:40 -07001275
1276 frh = nlmsg_data(nlh);
1277 memset(frh, 0, sizeof(*frh));
1278 frh->family = family;
1279 frh->action = FR_ACT_TO_TBL;
1280
1281 if (nla_put_u32(skb, FRA_L3MDEV, 1))
1282 goto nla_put_failure;
1283
1284 if (nla_put_u32(skb, FRA_PRIORITY, FIB_RULE_PREF))
1285 goto nla_put_failure;
1286
1287 nlmsg_end(skb, nlh);
1288
1289 /* fib_nl_{new,del}rule handling looks for net from skb->sk */
1290 skb->sk = dev_net(dev)->rtnl;
1291 if (add_it) {
David Ahernc21ef3e2017-04-16 09:48:24 -07001292 err = fib_nl_newrule(skb, nlh, NULL);
David Ahern1aa6c4f2016-06-08 10:55:40 -07001293 if (err == -EEXIST)
1294 err = 0;
1295 } else {
David Ahernc21ef3e2017-04-16 09:48:24 -07001296 err = fib_nl_delrule(skb, nlh, NULL);
David Ahern1aa6c4f2016-06-08 10:55:40 -07001297 if (err == -ENOENT)
1298 err = 0;
1299 }
1300 nlmsg_free(skb);
1301
1302 return err;
1303
1304nla_put_failure:
1305 nlmsg_free(skb);
1306
1307 return -EMSGSIZE;
1308}
1309
1310static int vrf_add_fib_rules(const struct net_device *dev)
1311{
1312 int err;
1313
1314 err = vrf_fib_rule(dev, AF_INET, true);
1315 if (err < 0)
1316 goto out_err;
1317
1318 err = vrf_fib_rule(dev, AF_INET6, true);
1319 if (err < 0)
1320 goto ipv6_err;
1321
David Aherne58e4152016-10-31 15:54:00 -07001322#if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1323 err = vrf_fib_rule(dev, RTNL_FAMILY_IPMR, true);
1324 if (err < 0)
1325 goto ipmr_err;
1326#endif
1327
David Ahern1aa6c4f2016-06-08 10:55:40 -07001328 return 0;
1329
David Aherne58e4152016-10-31 15:54:00 -07001330#if IS_ENABLED(CONFIG_IP_MROUTE_MULTIPLE_TABLES)
1331ipmr_err:
1332 vrf_fib_rule(dev, AF_INET6, false);
1333#endif
1334
David Ahern1aa6c4f2016-06-08 10:55:40 -07001335ipv6_err:
1336 vrf_fib_rule(dev, AF_INET, false);
1337
1338out_err:
1339 netdev_err(dev, "Failed to add FIB rules.\n");
1340 return err;
1341}
1342
David Ahern193125d2015-08-13 14:59:10 -06001343static void vrf_setup(struct net_device *dev)
1344{
1345 ether_setup(dev);
1346
1347 /* Initialize the device structure. */
1348 dev->netdev_ops = &vrf_netdev_ops;
David Ahernee15ee52015-09-29 20:07:12 -07001349 dev->l3mdev_ops = &vrf_l3mdev_ops;
David Ahern193125d2015-08-13 14:59:10 -06001350 dev->ethtool_ops = &vrf_ethtool_ops;
David S. Millercf124db2017-05-08 12:52:56 -04001351 dev->needs_free_netdev = true;
David Ahern193125d2015-08-13 14:59:10 -06001352
1353 /* Fill in device structure with ethernet-generic values. */
1354 eth_hw_addr_random(dev);
1355
1356 /* don't acquire vrf device's netif_tx_lock when transmitting */
1357 dev->features |= NETIF_F_LLTX;
1358
1359 /* don't allow vrf devices to change network namespaces. */
1360 dev->features |= NETIF_F_NETNS_LOCAL;
David Ahern78896812016-06-13 17:14:12 -07001361
1362 /* does not make sense for a VLAN to be added to a vrf device */
1363 dev->features |= NETIF_F_VLAN_CHALLENGED;
1364
1365 /* enable offload features */
1366 dev->features |= NETIF_F_GSO_SOFTWARE;
1367 dev->features |= NETIF_F_RXCSUM | NETIF_F_HW_CSUM;
1368 dev->features |= NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA;
1369
1370 dev->hw_features = dev->features;
1371 dev->hw_enc_features = dev->features;
1372
1373 /* default to no qdisc; user can add if desired */
1374 dev->priv_flags |= IFF_NO_QUEUE;
David Ahern193125d2015-08-13 14:59:10 -06001375}
1376
1377static int vrf_validate(struct nlattr *tb[], struct nlattr *data[])
1378{
1379 if (tb[IFLA_ADDRESS]) {
1380 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
1381 return -EINVAL;
1382 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
1383 return -EADDRNOTAVAIL;
1384 }
1385 return 0;
1386}
1387
1388static void vrf_dellink(struct net_device *dev, struct list_head *head)
1389{
David Ahern193125d2015-08-13 14:59:10 -06001390 unregister_netdevice_queue(dev, head);
1391}
1392
1393static int vrf_newlink(struct net *src_net, struct net_device *dev,
1394 struct nlattr *tb[], struct nlattr *data[])
1395{
1396 struct net_vrf *vrf = netdev_priv(dev);
David Ahern1aa6c4f2016-06-08 10:55:40 -07001397 int err;
David Ahern193125d2015-08-13 14:59:10 -06001398
1399 if (!data || !data[IFLA_VRF_TABLE])
1400 return -EINVAL;
1401
1402 vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
David Ahern24c63bb2017-01-10 15:22:25 -08001403 if (vrf->tb_id == RT_TABLE_UNSPEC)
1404 return -EINVAL;
David Ahern193125d2015-08-13 14:59:10 -06001405
David Ahern007979e2015-09-29 20:07:10 -07001406 dev->priv_flags |= IFF_L3MDEV_MASTER;
David Ahern193125d2015-08-13 14:59:10 -06001407
David Ahern1aa6c4f2016-06-08 10:55:40 -07001408 err = register_netdevice(dev);
1409 if (err)
1410 goto out;
1411
1412 if (add_fib_rules) {
1413 err = vrf_add_fib_rules(dev);
1414 if (err) {
1415 unregister_netdevice(dev);
1416 goto out;
1417 }
1418 add_fib_rules = false;
1419 }
1420
1421out:
1422 return err;
David Ahern193125d2015-08-13 14:59:10 -06001423}
1424
1425static size_t vrf_nl_getsize(const struct net_device *dev)
1426{
1427 return nla_total_size(sizeof(u32)); /* IFLA_VRF_TABLE */
1428}
1429
1430static int vrf_fillinfo(struct sk_buff *skb,
1431 const struct net_device *dev)
1432{
1433 struct net_vrf *vrf = netdev_priv(dev);
1434
1435 return nla_put_u32(skb, IFLA_VRF_TABLE, vrf->tb_id);
1436}
1437
David Ahern67eb0332016-02-02 07:43:45 -08001438static size_t vrf_get_slave_size(const struct net_device *bond_dev,
1439 const struct net_device *slave_dev)
1440{
1441 return nla_total_size(sizeof(u32)); /* IFLA_VRF_PORT_TABLE */
1442}
1443
1444static int vrf_fill_slave_info(struct sk_buff *skb,
1445 const struct net_device *vrf_dev,
1446 const struct net_device *slave_dev)
1447{
1448 struct net_vrf *vrf = netdev_priv(vrf_dev);
1449
1450 if (nla_put_u32(skb, IFLA_VRF_PORT_TABLE, vrf->tb_id))
1451 return -EMSGSIZE;
1452
1453 return 0;
1454}
1455
David Ahern193125d2015-08-13 14:59:10 -06001456static const struct nla_policy vrf_nl_policy[IFLA_VRF_MAX + 1] = {
1457 [IFLA_VRF_TABLE] = { .type = NLA_U32 },
1458};
1459
1460static struct rtnl_link_ops vrf_link_ops __read_mostly = {
1461 .kind = DRV_NAME,
1462 .priv_size = sizeof(struct net_vrf),
1463
1464 .get_size = vrf_nl_getsize,
1465 .policy = vrf_nl_policy,
1466 .validate = vrf_validate,
1467 .fill_info = vrf_fillinfo,
1468
David Ahern67eb0332016-02-02 07:43:45 -08001469 .get_slave_size = vrf_get_slave_size,
1470 .fill_slave_info = vrf_fill_slave_info,
1471
David Ahern193125d2015-08-13 14:59:10 -06001472 .newlink = vrf_newlink,
1473 .dellink = vrf_dellink,
1474 .setup = vrf_setup,
1475 .maxtype = IFLA_VRF_MAX,
1476};
1477
1478static int vrf_device_event(struct notifier_block *unused,
1479 unsigned long event, void *ptr)
1480{
1481 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1482
1483 /* only care about unregister events to drop slave references */
1484 if (event == NETDEV_UNREGISTER) {
David Ahern193125d2015-08-13 14:59:10 -06001485 struct net_device *vrf_dev;
1486
David Ahernfee6d4c2015-10-05 08:51:24 -07001487 if (!netif_is_l3_slave(dev))
David Ahern193125d2015-08-13 14:59:10 -06001488 goto out;
1489
Nikolay Aleksandrov58aa9082015-08-18 20:28:04 +03001490 vrf_dev = netdev_master_upper_dev_get(dev);
1491 vrf_del_slave(vrf_dev, dev);
David Ahern193125d2015-08-13 14:59:10 -06001492 }
1493out:
1494 return NOTIFY_DONE;
1495}
1496
1497static struct notifier_block vrf_notifier_block __read_mostly = {
1498 .notifier_call = vrf_device_event,
1499};
1500
1501static int __init vrf_init_module(void)
1502{
1503 int rc;
1504
David Ahern193125d2015-08-13 14:59:10 -06001505 register_netdevice_notifier(&vrf_notifier_block);
1506
1507 rc = rtnl_link_register(&vrf_link_ops);
1508 if (rc < 0)
1509 goto error;
1510
1511 return 0;
1512
1513error:
1514 unregister_netdevice_notifier(&vrf_notifier_block);
David Ahern193125d2015-08-13 14:59:10 -06001515 return rc;
1516}
1517
David Ahern193125d2015-08-13 14:59:10 -06001518module_init(vrf_init_module);
David Ahern193125d2015-08-13 14:59:10 -06001519MODULE_AUTHOR("Shrijeet Mukherjee, David Ahern");
1520MODULE_DESCRIPTION("Device driver to instantiate VRF domains");
1521MODULE_LICENSE("GPL");
1522MODULE_ALIAS_RTNL_LINK(DRV_NAME);
1523MODULE_VERSION(DRV_VERSION);