blob: ce77b8b693ae40809660bfa85313b99530eb068b [file] [log] [blame]
stephen hemmingerd3428942012-10-01 12:32:35 +00001/*
Rami Roseneb5ce432012-11-13 13:29:15 +00002 * VXLAN: Virtual eXtensible Local Area Network
stephen hemmingerd3428942012-10-01 12:32:35 +00003 *
4 * Copyright (c) 2012 Vyatta Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * TODO
11 * - use IANA UDP port number (when defined)
12 * - IPv6 (not in RFC)
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/kernel.h>
18#include <linux/types.h>
19#include <linux/module.h>
20#include <linux/errno.h>
21#include <linux/slab.h>
22#include <linux/skbuff.h>
23#include <linux/rculist.h>
24#include <linux/netdevice.h>
25#include <linux/in.h>
26#include <linux/ip.h>
27#include <linux/udp.h>
28#include <linux/igmp.h>
29#include <linux/etherdevice.h>
30#include <linux/if_ether.h>
stephen hemmingerd3428942012-10-01 12:32:35 +000031#include <linux/hash.h>
David Stevense4f67ad2012-11-20 02:50:14 +000032#include <net/arp.h>
33#include <net/ndisc.h>
stephen hemmingerd3428942012-10-01 12:32:35 +000034#include <net/ip.h>
35#include <net/icmp.h>
36#include <net/udp.h>
37#include <net/rtnetlink.h>
38#include <net/route.h>
39#include <net/dsfield.h>
40#include <net/inet_ecn.h>
41#include <net/net_namespace.h>
42#include <net/netns/generic.h>
43
44#define VXLAN_VERSION "0.1"
45
46#define VNI_HASH_BITS 10
47#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
48#define FDB_HASH_BITS 8
49#define FDB_HASH_SIZE (1<<FDB_HASH_BITS)
50#define FDB_AGE_DEFAULT 300 /* 5 min */
51#define FDB_AGE_INTERVAL (10 * HZ) /* rescan interval */
52
53#define VXLAN_N_VID (1u << 24)
54#define VXLAN_VID_MASK (VXLAN_N_VID - 1)
Alexander Duyck52b702f2012-11-09 13:35:24 +000055/* IP header + UDP + VXLAN + Ethernet header */
56#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
stephen hemmingerd3428942012-10-01 12:32:35 +000057
58#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
59
60/* VXLAN protocol header */
61struct vxlanhdr {
62 __be32 vx_flags;
63 __be32 vx_vni;
64};
65
66/* UDP port for VXLAN traffic. */
67static unsigned int vxlan_port __read_mostly = 8472;
68module_param_named(udp_port, vxlan_port, uint, 0444);
69MODULE_PARM_DESC(udp_port, "Destination UDP port");
70
71static bool log_ecn_error = true;
72module_param(log_ecn_error, bool, 0644);
73MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
74
75/* per-net private data for this module */
76static unsigned int vxlan_net_id;
77struct vxlan_net {
78 struct socket *sock; /* UDP encap socket */
79 struct hlist_head vni_list[VNI_HASH_SIZE];
80};
81
82/* Forwarding table entry */
83struct vxlan_fdb {
84 struct hlist_node hlist; /* linked list of entries */
85 struct rcu_head rcu;
86 unsigned long updated; /* jiffies */
87 unsigned long used;
88 __be32 remote_ip;
89 u16 state; /* see ndm_state */
90 u8 eth_addr[ETH_ALEN];
91};
92
93/* Per-cpu network traffic stats */
94struct vxlan_stats {
95 u64 rx_packets;
96 u64 rx_bytes;
97 u64 tx_packets;
98 u64 tx_bytes;
99 struct u64_stats_sync syncp;
100};
101
102/* Pseudo network device */
103struct vxlan_dev {
104 struct hlist_node hlist;
105 struct net_device *dev;
106 struct vxlan_stats __percpu *stats;
107 __u32 vni; /* virtual network id */
108 __be32 gaddr; /* multicast group */
109 __be32 saddr; /* source address */
110 unsigned int link; /* link to multicast over */
stephen hemminger05f47d62012-10-09 20:35:50 +0000111 __u16 port_min; /* source port range */
112 __u16 port_max;
stephen hemmingerd3428942012-10-01 12:32:35 +0000113 __u8 tos; /* TOS override */
114 __u8 ttl;
David Stevense4f67ad2012-11-20 02:50:14 +0000115 u32 flags; /* VXLAN_F_* below */
stephen hemmingerd3428942012-10-01 12:32:35 +0000116
117 unsigned long age_interval;
118 struct timer_list age_timer;
119 spinlock_t hash_lock;
120 unsigned int addrcnt;
121 unsigned int addrmax;
stephen hemmingerd3428942012-10-01 12:32:35 +0000122
123 struct hlist_head fdb_head[FDB_HASH_SIZE];
124};
125
David Stevense4f67ad2012-11-20 02:50:14 +0000126#define VXLAN_F_LEARN 0x01
127#define VXLAN_F_PROXY 0x02
128#define VXLAN_F_RSC 0x04
129#define VXLAN_F_L2MISS 0x08
130#define VXLAN_F_L3MISS 0x10
131
stephen hemmingerd3428942012-10-01 12:32:35 +0000132/* salt for hash table */
133static u32 vxlan_salt __read_mostly;
134
135static inline struct hlist_head *vni_head(struct net *net, u32 id)
136{
137 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
138
139 return &vn->vni_list[hash_32(id, VNI_HASH_BITS)];
140}
141
142/* Look up VNI in a per net namespace table */
143static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id)
144{
145 struct vxlan_dev *vxlan;
146 struct hlist_node *node;
147
148 hlist_for_each_entry_rcu(vxlan, node, vni_head(net, id), hlist) {
149 if (vxlan->vni == id)
150 return vxlan;
151 }
152
153 return NULL;
154}
155
156/* Fill in neighbour message in skbuff. */
157static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
158 const struct vxlan_fdb *fdb,
159 u32 portid, u32 seq, int type, unsigned int flags)
160{
161 unsigned long now = jiffies;
162 struct nda_cacheinfo ci;
163 struct nlmsghdr *nlh;
164 struct ndmsg *ndm;
David Stevense4f67ad2012-11-20 02:50:14 +0000165 bool send_ip, send_eth;
stephen hemmingerd3428942012-10-01 12:32:35 +0000166
167 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*ndm), flags);
168 if (nlh == NULL)
169 return -EMSGSIZE;
170
171 ndm = nlmsg_data(nlh);
172 memset(ndm, 0, sizeof(*ndm));
David Stevense4f67ad2012-11-20 02:50:14 +0000173
174 send_eth = send_ip = true;
175
176 if (type == RTM_GETNEIGH) {
177 ndm->ndm_family = AF_INET;
178 send_ip = fdb->remote_ip != 0;
179 send_eth = !is_zero_ether_addr(fdb->eth_addr);
180 } else
181 ndm->ndm_family = AF_BRIDGE;
stephen hemmingerd3428942012-10-01 12:32:35 +0000182 ndm->ndm_state = fdb->state;
183 ndm->ndm_ifindex = vxlan->dev->ifindex;
184 ndm->ndm_flags = NTF_SELF;
185 ndm->ndm_type = NDA_DST;
186
David Stevense4f67ad2012-11-20 02:50:14 +0000187 if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
stephen hemmingerd3428942012-10-01 12:32:35 +0000188 goto nla_put_failure;
189
David Stevense4f67ad2012-11-20 02:50:14 +0000190 if (send_ip && nla_put_be32(skb, NDA_DST, fdb->remote_ip))
stephen hemmingerd3428942012-10-01 12:32:35 +0000191 goto nla_put_failure;
192
193 ci.ndm_used = jiffies_to_clock_t(now - fdb->used);
194 ci.ndm_confirmed = 0;
195 ci.ndm_updated = jiffies_to_clock_t(now - fdb->updated);
196 ci.ndm_refcnt = 0;
197
198 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
199 goto nla_put_failure;
200
201 return nlmsg_end(skb, nlh);
202
203nla_put_failure:
204 nlmsg_cancel(skb, nlh);
205 return -EMSGSIZE;
206}
207
208static inline size_t vxlan_nlmsg_size(void)
209{
210 return NLMSG_ALIGN(sizeof(struct ndmsg))
211 + nla_total_size(ETH_ALEN) /* NDA_LLADDR */
212 + nla_total_size(sizeof(__be32)) /* NDA_DST */
213 + nla_total_size(sizeof(struct nda_cacheinfo));
214}
215
216static void vxlan_fdb_notify(struct vxlan_dev *vxlan,
217 const struct vxlan_fdb *fdb, int type)
218{
219 struct net *net = dev_net(vxlan->dev);
220 struct sk_buff *skb;
221 int err = -ENOBUFS;
222
223 skb = nlmsg_new(vxlan_nlmsg_size(), GFP_ATOMIC);
224 if (skb == NULL)
225 goto errout;
226
227 err = vxlan_fdb_info(skb, vxlan, fdb, 0, 0, type, 0);
228 if (err < 0) {
229 /* -EMSGSIZE implies BUG in vxlan_nlmsg_size() */
230 WARN_ON(err == -EMSGSIZE);
231 kfree_skb(skb);
232 goto errout;
233 }
234
235 rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
236 return;
237errout:
238 if (err < 0)
239 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
240}
241
David Stevense4f67ad2012-11-20 02:50:14 +0000242static void vxlan_ip_miss(struct net_device *dev, __be32 ipa)
243{
244 struct vxlan_dev *vxlan = netdev_priv(dev);
245 struct vxlan_fdb f;
246
247 memset(&f, 0, sizeof f);
248 f.state = NUD_STALE;
249 f.remote_ip = ipa; /* goes to NDA_DST */
250
251 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
252}
253
254static void vxlan_fdb_miss(struct vxlan_dev *vxlan, const u8 eth_addr[ETH_ALEN])
255{
256 struct vxlan_fdb f;
257
258 memset(&f, 0, sizeof f);
259 f.state = NUD_STALE;
260 memcpy(f.eth_addr, eth_addr, ETH_ALEN);
261
262 vxlan_fdb_notify(vxlan, &f, RTM_GETNEIGH);
263}
264
stephen hemmingerd3428942012-10-01 12:32:35 +0000265/* Hash Ethernet address */
266static u32 eth_hash(const unsigned char *addr)
267{
268 u64 value = get_unaligned((u64 *)addr);
269
270 /* only want 6 bytes */
271#ifdef __BIG_ENDIAN
stephen hemmingerd3428942012-10-01 12:32:35 +0000272 value >>= 16;
stephen hemminger321fb992012-10-09 20:35:47 +0000273#else
274 value <<= 16;
stephen hemmingerd3428942012-10-01 12:32:35 +0000275#endif
276 return hash_64(value, FDB_HASH_BITS);
277}
278
279/* Hash chain to use given mac address */
280static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
281 const u8 *mac)
282{
283 return &vxlan->fdb_head[eth_hash(mac)];
284}
285
286/* Look up Ethernet address in forwarding table */
287static struct vxlan_fdb *vxlan_find_mac(struct vxlan_dev *vxlan,
288 const u8 *mac)
289
290{
291 struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
292 struct vxlan_fdb *f;
293 struct hlist_node *node;
294
295 hlist_for_each_entry_rcu(f, node, head, hlist) {
296 if (compare_ether_addr(mac, f->eth_addr) == 0)
297 return f;
298 }
299
300 return NULL;
301}
302
303/* Add new entry to forwarding table -- assumes lock held */
304static int vxlan_fdb_create(struct vxlan_dev *vxlan,
305 const u8 *mac, __be32 ip,
306 __u16 state, __u16 flags)
307{
308 struct vxlan_fdb *f;
309 int notify = 0;
310
311 f = vxlan_find_mac(vxlan, mac);
312 if (f) {
313 if (flags & NLM_F_EXCL) {
314 netdev_dbg(vxlan->dev,
315 "lost race to create %pM\n", mac);
316 return -EEXIST;
317 }
318 if (f->state != state) {
319 f->state = state;
320 f->updated = jiffies;
321 notify = 1;
322 }
323 } else {
324 if (!(flags & NLM_F_CREATE))
325 return -ENOENT;
326
327 if (vxlan->addrmax && vxlan->addrcnt >= vxlan->addrmax)
328 return -ENOSPC;
329
330 netdev_dbg(vxlan->dev, "add %pM -> %pI4\n", mac, &ip);
331 f = kmalloc(sizeof(*f), GFP_ATOMIC);
332 if (!f)
333 return -ENOMEM;
334
335 notify = 1;
336 f->remote_ip = ip;
337 f->state = state;
338 f->updated = f->used = jiffies;
339 memcpy(f->eth_addr, mac, ETH_ALEN);
340
341 ++vxlan->addrcnt;
342 hlist_add_head_rcu(&f->hlist,
343 vxlan_fdb_head(vxlan, mac));
344 }
345
346 if (notify)
347 vxlan_fdb_notify(vxlan, f, RTM_NEWNEIGH);
348
349 return 0;
350}
351
352static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
353{
354 netdev_dbg(vxlan->dev,
355 "delete %pM\n", f->eth_addr);
356
357 --vxlan->addrcnt;
358 vxlan_fdb_notify(vxlan, f, RTM_DELNEIGH);
359
360 hlist_del_rcu(&f->hlist);
361 kfree_rcu(f, rcu);
362}
363
364/* Add static entry (via netlink) */
365static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
366 struct net_device *dev,
367 const unsigned char *addr, u16 flags)
368{
369 struct vxlan_dev *vxlan = netdev_priv(dev);
370 __be32 ip;
371 int err;
372
373 if (!(ndm->ndm_state & (NUD_PERMANENT|NUD_REACHABLE))) {
374 pr_info("RTM_NEWNEIGH with invalid state %#x\n",
375 ndm->ndm_state);
376 return -EINVAL;
377 }
378
379 if (tb[NDA_DST] == NULL)
380 return -EINVAL;
381
382 if (nla_len(tb[NDA_DST]) != sizeof(__be32))
383 return -EAFNOSUPPORT;
384
385 ip = nla_get_be32(tb[NDA_DST]);
386
387 spin_lock_bh(&vxlan->hash_lock);
388 err = vxlan_fdb_create(vxlan, addr, ip, ndm->ndm_state, flags);
389 spin_unlock_bh(&vxlan->hash_lock);
390
391 return err;
392}
393
394/* Delete entry (via netlink) */
395static int vxlan_fdb_delete(struct ndmsg *ndm, struct net_device *dev,
396 const unsigned char *addr)
397{
398 struct vxlan_dev *vxlan = netdev_priv(dev);
399 struct vxlan_fdb *f;
400 int err = -ENOENT;
401
402 spin_lock_bh(&vxlan->hash_lock);
403 f = vxlan_find_mac(vxlan, addr);
404 if (f) {
405 vxlan_fdb_destroy(vxlan, f);
406 err = 0;
407 }
408 spin_unlock_bh(&vxlan->hash_lock);
409
410 return err;
411}
412
413/* Dump forwarding table */
414static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
415 struct net_device *dev, int idx)
416{
417 struct vxlan_dev *vxlan = netdev_priv(dev);
418 unsigned int h;
419
420 for (h = 0; h < FDB_HASH_SIZE; ++h) {
421 struct vxlan_fdb *f;
422 struct hlist_node *n;
423 int err;
424
425 hlist_for_each_entry_rcu(f, n, &vxlan->fdb_head[h], hlist) {
426 if (idx < cb->args[0])
427 goto skip;
428
429 err = vxlan_fdb_info(skb, vxlan, f,
430 NETLINK_CB(cb->skb).portid,
431 cb->nlh->nlmsg_seq,
432 RTM_NEWNEIGH,
433 NLM_F_MULTI);
434 if (err < 0)
435 break;
436skip:
437 ++idx;
438 }
439 }
440
441 return idx;
442}
443
444/* Watch incoming packets to learn mapping between Ethernet address
445 * and Tunnel endpoint.
446 */
447static void vxlan_snoop(struct net_device *dev,
448 __be32 src_ip, const u8 *src_mac)
449{
450 struct vxlan_dev *vxlan = netdev_priv(dev);
451 struct vxlan_fdb *f;
452 int err;
453
454 f = vxlan_find_mac(vxlan, src_mac);
455 if (likely(f)) {
456 f->used = jiffies;
457 if (likely(f->remote_ip == src_ip))
458 return;
459
460 if (net_ratelimit())
461 netdev_info(dev,
462 "%pM migrated from %pI4 to %pI4\n",
463 src_mac, &f->remote_ip, &src_ip);
464
465 f->remote_ip = src_ip;
466 f->updated = jiffies;
467 } else {
468 /* learned new entry */
469 spin_lock(&vxlan->hash_lock);
470 err = vxlan_fdb_create(vxlan, src_mac, src_ip,
471 NUD_REACHABLE,
472 NLM_F_EXCL|NLM_F_CREATE);
473 spin_unlock(&vxlan->hash_lock);
474 }
475}
476
477
478/* See if multicast group is already in use by other ID */
479static bool vxlan_group_used(struct vxlan_net *vn,
480 const struct vxlan_dev *this)
481{
482 const struct vxlan_dev *vxlan;
483 struct hlist_node *node;
484 unsigned h;
485
486 for (h = 0; h < VNI_HASH_SIZE; ++h)
487 hlist_for_each_entry(vxlan, node, &vn->vni_list[h], hlist) {
488 if (vxlan == this)
489 continue;
490
491 if (!netif_running(vxlan->dev))
492 continue;
493
494 if (vxlan->gaddr == this->gaddr)
495 return true;
496 }
497
498 return false;
499}
500
501/* kernel equivalent to IP_ADD_MEMBERSHIP */
502static int vxlan_join_group(struct net_device *dev)
503{
504 struct vxlan_dev *vxlan = netdev_priv(dev);
505 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
506 struct sock *sk = vn->sock->sk;
507 struct ip_mreqn mreq = {
508 .imr_multiaddr.s_addr = vxlan->gaddr,
509 };
510 int err;
511
512 /* Already a member of group */
513 if (vxlan_group_used(vn, vxlan))
514 return 0;
515
516 /* Need to drop RTNL to call multicast join */
517 rtnl_unlock();
518 lock_sock(sk);
519 err = ip_mc_join_group(sk, &mreq);
520 release_sock(sk);
521 rtnl_lock();
522
523 return err;
524}
525
526
527/* kernel equivalent to IP_DROP_MEMBERSHIP */
528static int vxlan_leave_group(struct net_device *dev)
529{
530 struct vxlan_dev *vxlan = netdev_priv(dev);
531 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
532 int err = 0;
533 struct sock *sk = vn->sock->sk;
534 struct ip_mreqn mreq = {
535 .imr_multiaddr.s_addr = vxlan->gaddr,
536 };
537
538 /* Only leave group when last vxlan is done. */
539 if (vxlan_group_used(vn, vxlan))
540 return 0;
541
542 /* Need to drop RTNL to call multicast leave */
543 rtnl_unlock();
544 lock_sock(sk);
545 err = ip_mc_leave_group(sk, &mreq);
546 release_sock(sk);
547 rtnl_lock();
548
549 return err;
550}
551
552/* Callback from net/ipv4/udp.c to receive packets */
553static int vxlan_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
554{
555 struct iphdr *oip;
556 struct vxlanhdr *vxh;
557 struct vxlan_dev *vxlan;
558 struct vxlan_stats *stats;
559 __u32 vni;
560 int err;
561
562 /* pop off outer UDP header */
563 __skb_pull(skb, sizeof(struct udphdr));
564
565 /* Need Vxlan and inner Ethernet header to be present */
566 if (!pskb_may_pull(skb, sizeof(struct vxlanhdr)))
567 goto error;
568
569 /* Drop packets with reserved bits set */
570 vxh = (struct vxlanhdr *) skb->data;
571 if (vxh->vx_flags != htonl(VXLAN_FLAGS) ||
572 (vxh->vx_vni & htonl(0xff))) {
573 netdev_dbg(skb->dev, "invalid vxlan flags=%#x vni=%#x\n",
574 ntohl(vxh->vx_flags), ntohl(vxh->vx_vni));
575 goto error;
576 }
577
578 __skb_pull(skb, sizeof(struct vxlanhdr));
stephen hemmingerd3428942012-10-01 12:32:35 +0000579
580 /* Is this VNI defined? */
581 vni = ntohl(vxh->vx_vni) >> 8;
582 vxlan = vxlan_find_vni(sock_net(sk), vni);
583 if (!vxlan) {
584 netdev_dbg(skb->dev, "unknown vni %d\n", vni);
585 goto drop;
586 }
587
588 if (!pskb_may_pull(skb, ETH_HLEN)) {
589 vxlan->dev->stats.rx_length_errors++;
590 vxlan->dev->stats.rx_errors++;
591 goto drop;
592 }
593
David Stevense4f67ad2012-11-20 02:50:14 +0000594 skb_reset_mac_header(skb);
595
stephen hemmingerd3428942012-10-01 12:32:35 +0000596 /* Re-examine inner Ethernet packet */
597 oip = ip_hdr(skb);
598 skb->protocol = eth_type_trans(skb, vxlan->dev);
stephen hemmingerd3428942012-10-01 12:32:35 +0000599
600 /* Ignore packet loops (and multicast echo) */
601 if (compare_ether_addr(eth_hdr(skb)->h_source,
602 vxlan->dev->dev_addr) == 0)
603 goto drop;
604
David Stevense4f67ad2012-11-20 02:50:14 +0000605 if (vxlan->flags & VXLAN_F_LEARN)
stephen hemmingerd3428942012-10-01 12:32:35 +0000606 vxlan_snoop(skb->dev, oip->saddr, eth_hdr(skb)->h_source);
607
608 __skb_tunnel_rx(skb, vxlan->dev);
609 skb_reset_network_header(skb);
stephen hemmingerd97c00a2012-10-09 20:35:52 +0000610 skb->ip_summed = CHECKSUM_NONE;
stephen hemmingerd3428942012-10-01 12:32:35 +0000611
612 err = IP_ECN_decapsulate(oip, skb);
613 if (unlikely(err)) {
614 if (log_ecn_error)
615 net_info_ratelimited("non-ECT from %pI4 with TOS=%#x\n",
616 &oip->saddr, oip->tos);
617 if (err > 1) {
618 ++vxlan->dev->stats.rx_frame_errors;
619 ++vxlan->dev->stats.rx_errors;
620 goto drop;
621 }
622 }
623
624 stats = this_cpu_ptr(vxlan->stats);
625 u64_stats_update_begin(&stats->syncp);
626 stats->rx_packets++;
627 stats->rx_bytes += skb->len;
628 u64_stats_update_end(&stats->syncp);
629
630 netif_rx(skb);
631
632 return 0;
633error:
634 /* Put UDP header back */
635 __skb_push(skb, sizeof(struct udphdr));
636
637 return 1;
638drop:
639 /* Consume bad packet */
640 kfree_skb(skb);
641 return 0;
642}
643
David Stevense4f67ad2012-11-20 02:50:14 +0000644static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
645{
646 struct vxlan_dev *vxlan = netdev_priv(dev);
647 struct arphdr *parp;
648 u8 *arpptr, *sha;
649 __be32 sip, tip;
650 struct neighbour *n;
651
652 if (dev->flags & IFF_NOARP)
653 goto out;
654
655 if (!pskb_may_pull(skb, arp_hdr_len(dev))) {
656 dev->stats.tx_dropped++;
657 goto out;
658 }
659 parp = arp_hdr(skb);
660
661 if ((parp->ar_hrd != htons(ARPHRD_ETHER) &&
662 parp->ar_hrd != htons(ARPHRD_IEEE802)) ||
663 parp->ar_pro != htons(ETH_P_IP) ||
664 parp->ar_op != htons(ARPOP_REQUEST) ||
665 parp->ar_hln != dev->addr_len ||
666 parp->ar_pln != 4)
667 goto out;
668 arpptr = (u8 *)parp + sizeof(struct arphdr);
669 sha = arpptr;
670 arpptr += dev->addr_len; /* sha */
671 memcpy(&sip, arpptr, sizeof(sip));
672 arpptr += sizeof(sip);
673 arpptr += dev->addr_len; /* tha */
674 memcpy(&tip, arpptr, sizeof(tip));
675
676 if (ipv4_is_loopback(tip) ||
677 ipv4_is_multicast(tip))
678 goto out;
679
680 n = neigh_lookup(&arp_tbl, &tip, dev);
681
682 if (n) {
683 struct vxlan_dev *vxlan = netdev_priv(dev);
684 struct vxlan_fdb *f;
685 struct sk_buff *reply;
686
687 if (!(n->nud_state & NUD_CONNECTED)) {
688 neigh_release(n);
689 goto out;
690 }
691
692 f = vxlan_find_mac(vxlan, n->ha);
693 if (f && f->remote_ip == 0) {
694 /* bridge-local neighbor */
695 neigh_release(n);
696 goto out;
697 }
698
699 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
700 n->ha, sha);
701
702 neigh_release(n);
703
704 skb_reset_mac_header(reply);
705 __skb_pull(reply, skb_network_offset(reply));
706 reply->ip_summed = CHECKSUM_UNNECESSARY;
707 reply->pkt_type = PACKET_HOST;
708
709 if (netif_rx_ni(reply) == NET_RX_DROP)
710 dev->stats.rx_dropped++;
711 } else if (vxlan->flags & VXLAN_F_L3MISS)
712 vxlan_ip_miss(dev, tip);
713out:
714 consume_skb(skb);
715 return NETDEV_TX_OK;
716}
717
718static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
719{
720 struct vxlan_dev *vxlan = netdev_priv(dev);
721 struct neighbour *n;
722 struct iphdr *pip;
723
724 if (is_multicast_ether_addr(eth_hdr(skb)->h_dest))
725 return false;
726
727 n = NULL;
728 switch (ntohs(eth_hdr(skb)->h_proto)) {
729 case ETH_P_IP:
730 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
731 return false;
732 pip = ip_hdr(skb);
733 n = neigh_lookup(&arp_tbl, &pip->daddr, dev);
734 break;
735 default:
736 return false;
737 }
738
739 if (n) {
740 bool diff;
741
742 diff = compare_ether_addr(eth_hdr(skb)->h_dest, n->ha) != 0;
743 if (diff) {
744 memcpy(eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
745 dev->addr_len);
746 memcpy(eth_hdr(skb)->h_dest, n->ha, dev->addr_len);
747 }
748 neigh_release(n);
749 return diff;
750 } else if (vxlan->flags & VXLAN_F_L3MISS)
751 vxlan_ip_miss(dev, pip->daddr);
752 return false;
753}
754
stephen hemmingerd3428942012-10-01 12:32:35 +0000755/* Extract dsfield from inner protocol */
756static inline u8 vxlan_get_dsfield(const struct iphdr *iph,
757 const struct sk_buff *skb)
758{
759 if (skb->protocol == htons(ETH_P_IP))
760 return iph->tos;
761 else if (skb->protocol == htons(ETH_P_IPV6))
762 return ipv6_get_dsfield((const struct ipv6hdr *)iph);
763 else
764 return 0;
765}
766
767/* Propogate ECN bits out */
768static inline u8 vxlan_ecn_encap(u8 tos,
769 const struct iphdr *iph,
770 const struct sk_buff *skb)
771{
772 u8 inner = vxlan_get_dsfield(iph, skb);
773
774 return INET_ECN_encapsulate(tos, inner);
775}
776
stephen hemminger1cad8712012-10-09 20:35:49 +0000777static void vxlan_sock_free(struct sk_buff *skb)
778{
779 sock_put(skb->sk);
780}
781
782/* On transmit, associate with the tunnel socket */
783static void vxlan_set_owner(struct net_device *dev, struct sk_buff *skb)
784{
785 struct vxlan_net *vn = net_generic(dev_net(dev), vxlan_net_id);
786 struct sock *sk = vn->sock->sk;
787
788 skb_orphan(skb);
789 sock_hold(sk);
790 skb->sk = sk;
791 skb->destructor = vxlan_sock_free;
792}
793
stephen hemminger05f47d62012-10-09 20:35:50 +0000794/* Compute source port for outgoing packet
795 * first choice to use L4 flow hash since it will spread
796 * better and maybe available from hardware
797 * secondary choice is to use jhash on the Ethernet header
798 */
799static u16 vxlan_src_port(const struct vxlan_dev *vxlan, struct sk_buff *skb)
800{
801 unsigned int range = (vxlan->port_max - vxlan->port_min) + 1;
802 u32 hash;
803
804 hash = skb_get_rxhash(skb);
805 if (!hash)
806 hash = jhash(skb->data, 2 * ETH_ALEN,
807 (__force u32) skb->protocol);
808
809 return (((u64) hash * range) >> 32) + vxlan->port_min;
810}
811
stephen hemmingerd3428942012-10-01 12:32:35 +0000812/* Transmit local packets over Vxlan
813 *
814 * Outer IP header inherits ECN and DF from inner header.
815 * Outer UDP destination is the VXLAN assigned port.
stephen hemminger05f47d62012-10-09 20:35:50 +0000816 * source port is based on hash of flow
stephen hemmingerd3428942012-10-01 12:32:35 +0000817 */
818static netdev_tx_t vxlan_xmit(struct sk_buff *skb, struct net_device *dev)
819{
820 struct vxlan_dev *vxlan = netdev_priv(dev);
821 struct rtable *rt;
stephen hemmingerd3428942012-10-01 12:32:35 +0000822 const struct iphdr *old_iph;
David Stevense4f67ad2012-11-20 02:50:14 +0000823 struct ethhdr *eth;
stephen hemmingerd3428942012-10-01 12:32:35 +0000824 struct iphdr *iph;
825 struct vxlanhdr *vxh;
826 struct udphdr *uh;
827 struct flowi4 fl4;
stephen hemmingerd3428942012-10-01 12:32:35 +0000828 unsigned int pkt_len = skb->len;
stephen hemmingerd3428942012-10-01 12:32:35 +0000829 __be32 dst;
stephen hemminger05f47d62012-10-09 20:35:50 +0000830 __u16 src_port;
stephen hemmingerd3428942012-10-01 12:32:35 +0000831 __be16 df = 0;
832 __u8 tos, ttl;
833 int err;
David Stevense4f67ad2012-11-20 02:50:14 +0000834 bool did_rsc = false;
835 const struct vxlan_fdb *f;
stephen hemmingerd3428942012-10-01 12:32:35 +0000836
David Stevense4f67ad2012-11-20 02:50:14 +0000837 skb_reset_mac_header(skb);
838 eth = eth_hdr(skb);
839
840 if ((vxlan->flags & VXLAN_F_PROXY) && ntohs(eth->h_proto) == ETH_P_ARP)
841 return arp_reduce(dev, skb);
842 else if ((vxlan->flags&VXLAN_F_RSC) && ntohs(eth->h_proto) == ETH_P_IP)
843 did_rsc = route_shortcircuit(dev, skb);
844
845 f = vxlan_find_mac(vxlan, eth->h_dest);
846 if (f == NULL) {
847 did_rsc = false;
848 dst = vxlan->gaddr;
849 if (!dst && (vxlan->flags & VXLAN_F_L2MISS) &&
850 !is_multicast_ether_addr(eth->h_dest))
851 vxlan_fdb_miss(vxlan, eth->h_dest);
852 } else
853 dst = f->remote_ip;
854
855 if (!dst) {
856 if (did_rsc) {
857 __skb_pull(skb, skb_network_offset(skb));
858 skb->ip_summed = CHECKSUM_NONE;
859 skb->pkt_type = PACKET_HOST;
860
861 /* short-circuited back to local bridge */
862 if (netif_rx(skb) == NET_RX_SUCCESS) {
863 struct vxlan_stats *stats =
864 this_cpu_ptr(vxlan->stats);
865
866 u64_stats_update_begin(&stats->syncp);
867 stats->tx_packets++;
868 stats->tx_bytes += pkt_len;
869 u64_stats_update_end(&stats->syncp);
870 } else {
871 dev->stats.tx_errors++;
872 dev->stats.tx_aborted_errors++;
873 }
874 return NETDEV_TX_OK;
875 }
stephen hemmingeref59feb2012-10-09 20:35:46 +0000876 goto drop;
David Stevense4f67ad2012-11-20 02:50:14 +0000877 }
stephen hemmingeref59feb2012-10-09 20:35:46 +0000878
stephen hemmingerd3428942012-10-01 12:32:35 +0000879 /* Need space for new headers (invalidates iph ptr) */
880 if (skb_cow_head(skb, VXLAN_HEADROOM))
881 goto drop;
882
stephen hemmingerd3428942012-10-01 12:32:35 +0000883 old_iph = ip_hdr(skb);
884
stephen hemmingerd3428942012-10-01 12:32:35 +0000885 ttl = vxlan->ttl;
886 if (!ttl && IN_MULTICAST(ntohl(dst)))
887 ttl = 1;
888
889 tos = vxlan->tos;
890 if (tos == 1)
891 tos = vxlan_get_dsfield(old_iph, skb);
892
stephen hemminger05f47d62012-10-09 20:35:50 +0000893 src_port = vxlan_src_port(vxlan, skb);
stephen hemmingerd3428942012-10-01 12:32:35 +0000894
stephen hemmingerca78f182012-10-09 20:35:48 +0000895 memset(&fl4, 0, sizeof(fl4));
896 fl4.flowi4_oif = vxlan->link;
897 fl4.flowi4_tos = RT_TOS(tos);
898 fl4.daddr = dst;
899 fl4.saddr = vxlan->saddr;
900
901 rt = ip_route_output_key(dev_net(dev), &fl4);
stephen hemmingerd3428942012-10-01 12:32:35 +0000902 if (IS_ERR(rt)) {
903 netdev_dbg(dev, "no route to %pI4\n", &dst);
904 dev->stats.tx_carrier_errors++;
905 goto tx_error;
906 }
907
908 if (rt->dst.dev == dev) {
909 netdev_dbg(dev, "circular route to %pI4\n", &dst);
910 ip_rt_put(rt);
911 dev->stats.collisions++;
912 goto tx_error;
913 }
914
915 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
916 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
917 IPSKB_REROUTED);
918 skb_dst_drop(skb);
919 skb_dst_set(skb, &rt->dst);
920
921 vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
922 vxh->vx_flags = htonl(VXLAN_FLAGS);
923 vxh->vx_vni = htonl(vxlan->vni << 8);
924
925 __skb_push(skb, sizeof(*uh));
926 skb_reset_transport_header(skb);
927 uh = udp_hdr(skb);
928
929 uh->dest = htons(vxlan_port);
stephen hemminger05f47d62012-10-09 20:35:50 +0000930 uh->source = htons(src_port);
stephen hemmingerd3428942012-10-01 12:32:35 +0000931
932 uh->len = htons(skb->len);
933 uh->check = 0;
934
935 __skb_push(skb, sizeof(*iph));
936 skb_reset_network_header(skb);
937 iph = ip_hdr(skb);
938 iph->version = 4;
939 iph->ihl = sizeof(struct iphdr) >> 2;
940 iph->frag_off = df;
941 iph->protocol = IPPROTO_UDP;
942 iph->tos = vxlan_ecn_encap(tos, old_iph, skb);
stephen hemmingerca78f182012-10-09 20:35:48 +0000943 iph->daddr = dst;
stephen hemmingerd3428942012-10-01 12:32:35 +0000944 iph->saddr = fl4.saddr;
945 iph->ttl = ttl ? : ip4_dst_hoplimit(&rt->dst);
946
stephen hemminger1cad8712012-10-09 20:35:49 +0000947 vxlan_set_owner(dev, skb);
948
Amerigo Wangaa0010f2012-11-11 21:52:33 +0000949 /* See iptunnel_xmit() */
stephen hemmingerd3428942012-10-01 12:32:35 +0000950 skb->ip_summed = CHECKSUM_NONE;
951 ip_select_ident(iph, &rt->dst, NULL);
952
953 err = ip_local_out(skb);
954 if (likely(net_xmit_eval(err) == 0)) {
955 struct vxlan_stats *stats = this_cpu_ptr(vxlan->stats);
956
957 u64_stats_update_begin(&stats->syncp);
958 stats->tx_packets++;
959 stats->tx_bytes += pkt_len;
960 u64_stats_update_end(&stats->syncp);
961 } else {
962 dev->stats.tx_errors++;
963 dev->stats.tx_aborted_errors++;
964 }
965 return NETDEV_TX_OK;
966
967drop:
968 dev->stats.tx_dropped++;
969 goto tx_free;
970
971tx_error:
972 dev->stats.tx_errors++;
973tx_free:
974 dev_kfree_skb(skb);
975 return NETDEV_TX_OK;
976}
977
978/* Walk the forwarding table and purge stale entries */
979static void vxlan_cleanup(unsigned long arg)
980{
981 struct vxlan_dev *vxlan = (struct vxlan_dev *) arg;
982 unsigned long next_timer = jiffies + FDB_AGE_INTERVAL;
983 unsigned int h;
984
985 if (!netif_running(vxlan->dev))
986 return;
987
988 spin_lock_bh(&vxlan->hash_lock);
989 for (h = 0; h < FDB_HASH_SIZE; ++h) {
990 struct hlist_node *p, *n;
991 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
992 struct vxlan_fdb *f
993 = container_of(p, struct vxlan_fdb, hlist);
994 unsigned long timeout;
995
stephen hemminger3c172862012-10-26 06:24:34 +0000996 if (f->state & NUD_PERMANENT)
stephen hemmingerd3428942012-10-01 12:32:35 +0000997 continue;
998
999 timeout = f->used + vxlan->age_interval * HZ;
1000 if (time_before_eq(timeout, jiffies)) {
1001 netdev_dbg(vxlan->dev,
1002 "garbage collect %pM\n",
1003 f->eth_addr);
1004 f->state = NUD_STALE;
1005 vxlan_fdb_destroy(vxlan, f);
1006 } else if (time_before(timeout, next_timer))
1007 next_timer = timeout;
1008 }
1009 }
1010 spin_unlock_bh(&vxlan->hash_lock);
1011
1012 mod_timer(&vxlan->age_timer, next_timer);
1013}
1014
1015/* Setup stats when device is created */
1016static int vxlan_init(struct net_device *dev)
1017{
1018 struct vxlan_dev *vxlan = netdev_priv(dev);
1019
1020 vxlan->stats = alloc_percpu(struct vxlan_stats);
1021 if (!vxlan->stats)
1022 return -ENOMEM;
1023
1024 return 0;
1025}
1026
1027/* Start ageing timer and join group when device is brought up */
1028static int vxlan_open(struct net_device *dev)
1029{
1030 struct vxlan_dev *vxlan = netdev_priv(dev);
1031 int err;
1032
1033 if (vxlan->gaddr) {
1034 err = vxlan_join_group(dev);
1035 if (err)
1036 return err;
1037 }
1038
1039 if (vxlan->age_interval)
1040 mod_timer(&vxlan->age_timer, jiffies + FDB_AGE_INTERVAL);
1041
1042 return 0;
1043}
1044
1045/* Purge the forwarding table */
1046static void vxlan_flush(struct vxlan_dev *vxlan)
1047{
1048 unsigned h;
1049
1050 spin_lock_bh(&vxlan->hash_lock);
1051 for (h = 0; h < FDB_HASH_SIZE; ++h) {
1052 struct hlist_node *p, *n;
1053 hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
1054 struct vxlan_fdb *f
1055 = container_of(p, struct vxlan_fdb, hlist);
1056 vxlan_fdb_destroy(vxlan, f);
1057 }
1058 }
1059 spin_unlock_bh(&vxlan->hash_lock);
1060}
1061
1062/* Cleanup timer and forwarding table on shutdown */
1063static int vxlan_stop(struct net_device *dev)
1064{
1065 struct vxlan_dev *vxlan = netdev_priv(dev);
1066
1067 if (vxlan->gaddr)
1068 vxlan_leave_group(dev);
1069
1070 del_timer_sync(&vxlan->age_timer);
1071
1072 vxlan_flush(vxlan);
1073
1074 return 0;
1075}
1076
1077/* Merge per-cpu statistics */
1078static struct rtnl_link_stats64 *vxlan_stats64(struct net_device *dev,
1079 struct rtnl_link_stats64 *stats)
1080{
1081 struct vxlan_dev *vxlan = netdev_priv(dev);
1082 struct vxlan_stats tmp, sum = { 0 };
1083 unsigned int cpu;
1084
1085 for_each_possible_cpu(cpu) {
1086 unsigned int start;
1087 const struct vxlan_stats *stats
1088 = per_cpu_ptr(vxlan->stats, cpu);
1089
1090 do {
1091 start = u64_stats_fetch_begin_bh(&stats->syncp);
1092 memcpy(&tmp, stats, sizeof(tmp));
1093 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1094
1095 sum.tx_bytes += tmp.tx_bytes;
1096 sum.tx_packets += tmp.tx_packets;
1097 sum.rx_bytes += tmp.rx_bytes;
1098 sum.rx_packets += tmp.rx_packets;
1099 }
1100
1101 stats->tx_bytes = sum.tx_bytes;
1102 stats->tx_packets = sum.tx_packets;
1103 stats->rx_bytes = sum.rx_bytes;
1104 stats->rx_packets = sum.rx_packets;
1105
1106 stats->multicast = dev->stats.multicast;
1107 stats->rx_length_errors = dev->stats.rx_length_errors;
1108 stats->rx_frame_errors = dev->stats.rx_frame_errors;
1109 stats->rx_errors = dev->stats.rx_errors;
1110
1111 stats->tx_dropped = dev->stats.tx_dropped;
1112 stats->tx_carrier_errors = dev->stats.tx_carrier_errors;
1113 stats->tx_aborted_errors = dev->stats.tx_aborted_errors;
1114 stats->collisions = dev->stats.collisions;
1115 stats->tx_errors = dev->stats.tx_errors;
1116
1117 return stats;
1118}
1119
1120/* Stub, nothing needs to be done. */
1121static void vxlan_set_multicast_list(struct net_device *dev)
1122{
1123}
1124
1125static const struct net_device_ops vxlan_netdev_ops = {
1126 .ndo_init = vxlan_init,
1127 .ndo_open = vxlan_open,
1128 .ndo_stop = vxlan_stop,
1129 .ndo_start_xmit = vxlan_xmit,
1130 .ndo_get_stats64 = vxlan_stats64,
1131 .ndo_set_rx_mode = vxlan_set_multicast_list,
1132 .ndo_change_mtu = eth_change_mtu,
1133 .ndo_validate_addr = eth_validate_addr,
1134 .ndo_set_mac_address = eth_mac_addr,
1135 .ndo_fdb_add = vxlan_fdb_add,
1136 .ndo_fdb_del = vxlan_fdb_delete,
1137 .ndo_fdb_dump = vxlan_fdb_dump,
1138};
1139
1140/* Info for udev, that this is a virtual tunnel endpoint */
1141static struct device_type vxlan_type = {
1142 .name = "vxlan",
1143};
1144
1145static void vxlan_free(struct net_device *dev)
1146{
1147 struct vxlan_dev *vxlan = netdev_priv(dev);
1148
1149 free_percpu(vxlan->stats);
1150 free_netdev(dev);
1151}
1152
1153/* Initialize the device structure. */
1154static void vxlan_setup(struct net_device *dev)
1155{
1156 struct vxlan_dev *vxlan = netdev_priv(dev);
1157 unsigned h;
stephen hemminger05f47d62012-10-09 20:35:50 +00001158 int low, high;
stephen hemmingerd3428942012-10-01 12:32:35 +00001159
1160 eth_hw_addr_random(dev);
1161 ether_setup(dev);
stephen hemminger2840bf22012-10-09 20:35:51 +00001162 dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
stephen hemmingerd3428942012-10-01 12:32:35 +00001163
1164 dev->netdev_ops = &vxlan_netdev_ops;
1165 dev->destructor = vxlan_free;
1166 SET_NETDEV_DEVTYPE(dev, &vxlan_type);
1167
1168 dev->tx_queue_len = 0;
1169 dev->features |= NETIF_F_LLTX;
1170 dev->features |= NETIF_F_NETNS_LOCAL;
1171 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1172
1173 spin_lock_init(&vxlan->hash_lock);
1174
1175 init_timer_deferrable(&vxlan->age_timer);
1176 vxlan->age_timer.function = vxlan_cleanup;
1177 vxlan->age_timer.data = (unsigned long) vxlan;
1178
stephen hemminger05f47d62012-10-09 20:35:50 +00001179 inet_get_local_port_range(&low, &high);
1180 vxlan->port_min = low;
1181 vxlan->port_max = high;
1182
stephen hemmingerd3428942012-10-01 12:32:35 +00001183 vxlan->dev = dev;
1184
1185 for (h = 0; h < FDB_HASH_SIZE; ++h)
1186 INIT_HLIST_HEAD(&vxlan->fdb_head[h]);
1187}
1188
1189static const struct nla_policy vxlan_policy[IFLA_VXLAN_MAX + 1] = {
1190 [IFLA_VXLAN_ID] = { .type = NLA_U32 },
1191 [IFLA_VXLAN_GROUP] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
1192 [IFLA_VXLAN_LINK] = { .type = NLA_U32 },
1193 [IFLA_VXLAN_LOCAL] = { .len = FIELD_SIZEOF(struct iphdr, saddr) },
1194 [IFLA_VXLAN_TOS] = { .type = NLA_U8 },
1195 [IFLA_VXLAN_TTL] = { .type = NLA_U8 },
1196 [IFLA_VXLAN_LEARNING] = { .type = NLA_U8 },
1197 [IFLA_VXLAN_AGEING] = { .type = NLA_U32 },
1198 [IFLA_VXLAN_LIMIT] = { .type = NLA_U32 },
stephen hemminger05f47d62012-10-09 20:35:50 +00001199 [IFLA_VXLAN_PORT_RANGE] = { .len = sizeof(struct ifla_vxlan_port_range) },
David Stevense4f67ad2012-11-20 02:50:14 +00001200 [IFLA_VXLAN_PROXY] = { .type = NLA_U8 },
1201 [IFLA_VXLAN_RSC] = { .type = NLA_U8 },
1202 [IFLA_VXLAN_L2MISS] = { .type = NLA_U8 },
1203 [IFLA_VXLAN_L3MISS] = { .type = NLA_U8 },
stephen hemmingerd3428942012-10-01 12:32:35 +00001204};
1205
1206static int vxlan_validate(struct nlattr *tb[], struct nlattr *data[])
1207{
1208 if (tb[IFLA_ADDRESS]) {
1209 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) {
1210 pr_debug("invalid link address (not ethernet)\n");
1211 return -EINVAL;
1212 }
1213
1214 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) {
1215 pr_debug("invalid all zero ethernet address\n");
1216 return -EADDRNOTAVAIL;
1217 }
1218 }
1219
1220 if (!data)
1221 return -EINVAL;
1222
1223 if (data[IFLA_VXLAN_ID]) {
1224 __u32 id = nla_get_u32(data[IFLA_VXLAN_ID]);
1225 if (id >= VXLAN_VID_MASK)
1226 return -ERANGE;
1227 }
1228
1229 if (data[IFLA_VXLAN_GROUP]) {
1230 __be32 gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1231 if (!IN_MULTICAST(ntohl(gaddr))) {
1232 pr_debug("group address is not IPv4 multicast\n");
1233 return -EADDRNOTAVAIL;
1234 }
1235 }
stephen hemminger05f47d62012-10-09 20:35:50 +00001236
1237 if (data[IFLA_VXLAN_PORT_RANGE]) {
1238 const struct ifla_vxlan_port_range *p
1239 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1240
1241 if (ntohs(p->high) < ntohs(p->low)) {
1242 pr_debug("port range %u .. %u not valid\n",
1243 ntohs(p->low), ntohs(p->high));
1244 return -EINVAL;
1245 }
1246 }
1247
stephen hemmingerd3428942012-10-01 12:32:35 +00001248 return 0;
1249}
1250
1251static int vxlan_newlink(struct net *net, struct net_device *dev,
1252 struct nlattr *tb[], struct nlattr *data[])
1253{
1254 struct vxlan_dev *vxlan = netdev_priv(dev);
1255 __u32 vni;
1256 int err;
1257
1258 if (!data[IFLA_VXLAN_ID])
1259 return -EINVAL;
1260
1261 vni = nla_get_u32(data[IFLA_VXLAN_ID]);
1262 if (vxlan_find_vni(net, vni)) {
1263 pr_info("duplicate VNI %u\n", vni);
1264 return -EEXIST;
1265 }
1266 vxlan->vni = vni;
1267
1268 if (data[IFLA_VXLAN_GROUP])
1269 vxlan->gaddr = nla_get_be32(data[IFLA_VXLAN_GROUP]);
1270
1271 if (data[IFLA_VXLAN_LOCAL])
1272 vxlan->saddr = nla_get_be32(data[IFLA_VXLAN_LOCAL]);
1273
stephen hemminger34e02aa2012-10-09 20:35:53 +00001274 if (data[IFLA_VXLAN_LINK] &&
1275 (vxlan->link = nla_get_u32(data[IFLA_VXLAN_LINK]))) {
1276 struct net_device *lowerdev
1277 = __dev_get_by_index(net, vxlan->link);
stephen hemmingerd3428942012-10-01 12:32:35 +00001278
stephen hemminger34e02aa2012-10-09 20:35:53 +00001279 if (!lowerdev) {
1280 pr_info("ifindex %d does not exist\n", vxlan->link);
1281 return -ENODEV;
stephen hemmingerd3428942012-10-01 12:32:35 +00001282 }
stephen hemminger34e02aa2012-10-09 20:35:53 +00001283
1284 if (!tb[IFLA_MTU])
1285 dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
Alexander Duyck1ba56fb2012-11-13 13:10:59 +00001286
1287 /* update header length based on lower device */
1288 dev->hard_header_len = lowerdev->hard_header_len +
1289 VXLAN_HEADROOM;
stephen hemmingerd3428942012-10-01 12:32:35 +00001290 }
1291
1292 if (data[IFLA_VXLAN_TOS])
1293 vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
1294
Vincent Bernatafb97182012-10-30 10:27:16 +00001295 if (data[IFLA_VXLAN_TTL])
1296 vxlan->ttl = nla_get_u8(data[IFLA_VXLAN_TTL]);
1297
stephen hemmingerd3428942012-10-01 12:32:35 +00001298 if (!data[IFLA_VXLAN_LEARNING] || nla_get_u8(data[IFLA_VXLAN_LEARNING]))
David Stevense4f67ad2012-11-20 02:50:14 +00001299 vxlan->flags |= VXLAN_F_LEARN;
stephen hemmingerd3428942012-10-01 12:32:35 +00001300
1301 if (data[IFLA_VXLAN_AGEING])
1302 vxlan->age_interval = nla_get_u32(data[IFLA_VXLAN_AGEING]);
1303 else
1304 vxlan->age_interval = FDB_AGE_DEFAULT;
1305
David Stevense4f67ad2012-11-20 02:50:14 +00001306 if (data[IFLA_VXLAN_PROXY] && nla_get_u8(data[IFLA_VXLAN_PROXY]))
1307 vxlan->flags |= VXLAN_F_PROXY;
1308
1309 if (data[IFLA_VXLAN_RSC] && nla_get_u8(data[IFLA_VXLAN_RSC]))
1310 vxlan->flags |= VXLAN_F_RSC;
1311
1312 if (data[IFLA_VXLAN_L2MISS] && nla_get_u8(data[IFLA_VXLAN_L2MISS]))
1313 vxlan->flags |= VXLAN_F_L2MISS;
1314
1315 if (data[IFLA_VXLAN_L3MISS] && nla_get_u8(data[IFLA_VXLAN_L3MISS]))
1316 vxlan->flags |= VXLAN_F_L3MISS;
1317
stephen hemmingerd3428942012-10-01 12:32:35 +00001318 if (data[IFLA_VXLAN_LIMIT])
1319 vxlan->addrmax = nla_get_u32(data[IFLA_VXLAN_LIMIT]);
1320
stephen hemminger05f47d62012-10-09 20:35:50 +00001321 if (data[IFLA_VXLAN_PORT_RANGE]) {
1322 const struct ifla_vxlan_port_range *p
1323 = nla_data(data[IFLA_VXLAN_PORT_RANGE]);
1324 vxlan->port_min = ntohs(p->low);
1325 vxlan->port_max = ntohs(p->high);
1326 }
1327
stephen hemmingerd3428942012-10-01 12:32:35 +00001328 err = register_netdevice(dev);
1329 if (!err)
1330 hlist_add_head_rcu(&vxlan->hlist, vni_head(net, vxlan->vni));
1331
1332 return err;
1333}
1334
1335static void vxlan_dellink(struct net_device *dev, struct list_head *head)
1336{
1337 struct vxlan_dev *vxlan = netdev_priv(dev);
1338
1339 hlist_del_rcu(&vxlan->hlist);
1340
1341 unregister_netdevice_queue(dev, head);
1342}
1343
1344static size_t vxlan_get_size(const struct net_device *dev)
1345{
1346
1347 return nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_ID */
1348 nla_total_size(sizeof(__be32)) +/* IFLA_VXLAN_GROUP */
1349 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LINK */
1350 nla_total_size(sizeof(__be32))+ /* IFLA_VXLAN_LOCAL */
1351 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TTL */
1352 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_TOS */
1353 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_LEARNING */
David Stevense4f67ad2012-11-20 02:50:14 +00001354 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_PROXY */
1355 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_RSC */
1356 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L2MISS */
1357 nla_total_size(sizeof(__u8)) + /* IFLA_VXLAN_L3MISS */
stephen hemmingerd3428942012-10-01 12:32:35 +00001358 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_AGEING */
1359 nla_total_size(sizeof(__u32)) + /* IFLA_VXLAN_LIMIT */
stephen hemminger05f47d62012-10-09 20:35:50 +00001360 nla_total_size(sizeof(struct ifla_vxlan_port_range)) +
stephen hemmingerd3428942012-10-01 12:32:35 +00001361 0;
1362}
1363
1364static int vxlan_fill_info(struct sk_buff *skb, const struct net_device *dev)
1365{
1366 const struct vxlan_dev *vxlan = netdev_priv(dev);
stephen hemminger05f47d62012-10-09 20:35:50 +00001367 struct ifla_vxlan_port_range ports = {
1368 .low = htons(vxlan->port_min),
1369 .high = htons(vxlan->port_max),
1370 };
stephen hemmingerd3428942012-10-01 12:32:35 +00001371
1372 if (nla_put_u32(skb, IFLA_VXLAN_ID, vxlan->vni))
1373 goto nla_put_failure;
1374
Stephen Hemminger7c41c422012-10-08 14:55:30 -07001375 if (vxlan->gaddr && nla_put_be32(skb, IFLA_VXLAN_GROUP, vxlan->gaddr))
stephen hemmingerd3428942012-10-01 12:32:35 +00001376 goto nla_put_failure;
1377
1378 if (vxlan->link && nla_put_u32(skb, IFLA_VXLAN_LINK, vxlan->link))
1379 goto nla_put_failure;
1380
Stephen Hemminger7c41c422012-10-08 14:55:30 -07001381 if (vxlan->saddr && nla_put_be32(skb, IFLA_VXLAN_LOCAL, vxlan->saddr))
stephen hemmingerd3428942012-10-01 12:32:35 +00001382 goto nla_put_failure;
1383
1384 if (nla_put_u8(skb, IFLA_VXLAN_TTL, vxlan->ttl) ||
1385 nla_put_u8(skb, IFLA_VXLAN_TOS, vxlan->tos) ||
David Stevense4f67ad2012-11-20 02:50:14 +00001386 nla_put_u8(skb, IFLA_VXLAN_LEARNING,
1387 !!(vxlan->flags & VXLAN_F_LEARN)) ||
1388 nla_put_u8(skb, IFLA_VXLAN_PROXY,
1389 !!(vxlan->flags & VXLAN_F_PROXY)) ||
1390 nla_put_u8(skb, IFLA_VXLAN_RSC, !!(vxlan->flags & VXLAN_F_RSC)) ||
1391 nla_put_u8(skb, IFLA_VXLAN_L2MISS,
1392 !!(vxlan->flags & VXLAN_F_L2MISS)) ||
1393 nla_put_u8(skb, IFLA_VXLAN_L3MISS,
1394 !!(vxlan->flags & VXLAN_F_L3MISS)) ||
stephen hemmingerd3428942012-10-01 12:32:35 +00001395 nla_put_u32(skb, IFLA_VXLAN_AGEING, vxlan->age_interval) ||
1396 nla_put_u32(skb, IFLA_VXLAN_LIMIT, vxlan->addrmax))
1397 goto nla_put_failure;
1398
stephen hemminger05f47d62012-10-09 20:35:50 +00001399 if (nla_put(skb, IFLA_VXLAN_PORT_RANGE, sizeof(ports), &ports))
1400 goto nla_put_failure;
1401
stephen hemmingerd3428942012-10-01 12:32:35 +00001402 return 0;
1403
1404nla_put_failure:
1405 return -EMSGSIZE;
1406}
1407
1408static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
1409 .kind = "vxlan",
1410 .maxtype = IFLA_VXLAN_MAX,
1411 .policy = vxlan_policy,
1412 .priv_size = sizeof(struct vxlan_dev),
1413 .setup = vxlan_setup,
1414 .validate = vxlan_validate,
1415 .newlink = vxlan_newlink,
1416 .dellink = vxlan_dellink,
1417 .get_size = vxlan_get_size,
1418 .fill_info = vxlan_fill_info,
1419};
1420
1421static __net_init int vxlan_init_net(struct net *net)
1422{
1423 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1424 struct sock *sk;
1425 struct sockaddr_in vxlan_addr = {
1426 .sin_family = AF_INET,
1427 .sin_addr.s_addr = htonl(INADDR_ANY),
1428 };
1429 int rc;
1430 unsigned h;
1431
1432 /* Create UDP socket for encapsulation receive. */
1433 rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &vn->sock);
1434 if (rc < 0) {
1435 pr_debug("UDP socket create failed\n");
1436 return rc;
1437 }
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001438 /* Put in proper namespace */
1439 sk = vn->sock->sk;
1440 sk_change_net(sk, net);
stephen hemmingerd3428942012-10-01 12:32:35 +00001441
1442 vxlan_addr.sin_port = htons(vxlan_port);
1443
1444 rc = kernel_bind(vn->sock, (struct sockaddr *) &vxlan_addr,
1445 sizeof(vxlan_addr));
1446 if (rc < 0) {
1447 pr_debug("bind for UDP socket %pI4:%u (%d)\n",
1448 &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001449 sk_release_kernel(sk);
stephen hemmingerd3428942012-10-01 12:32:35 +00001450 vn->sock = NULL;
1451 return rc;
1452 }
1453
1454 /* Disable multicast loopback */
stephen hemmingerd3428942012-10-01 12:32:35 +00001455 inet_sk(sk)->mc_loop = 0;
1456
1457 /* Mark socket as an encapsulation socket. */
1458 udp_sk(sk)->encap_type = 1;
1459 udp_sk(sk)->encap_rcv = vxlan_udp_encap_recv;
1460 udp_encap_enable();
1461
1462 for (h = 0; h < VNI_HASH_SIZE; ++h)
1463 INIT_HLIST_HEAD(&vn->vni_list[h]);
1464
1465 return 0;
1466}
1467
1468static __net_exit void vxlan_exit_net(struct net *net)
1469{
1470 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
1471
1472 if (vn->sock) {
stephen hemmingerbfe1b9b2012-10-01 18:49:21 +00001473 sk_release_kernel(vn->sock->sk);
stephen hemmingerd3428942012-10-01 12:32:35 +00001474 vn->sock = NULL;
1475 }
1476}
1477
1478static struct pernet_operations vxlan_net_ops = {
1479 .init = vxlan_init_net,
1480 .exit = vxlan_exit_net,
1481 .id = &vxlan_net_id,
1482 .size = sizeof(struct vxlan_net),
1483};
1484
1485static int __init vxlan_init_module(void)
1486{
1487 int rc;
1488
1489 get_random_bytes(&vxlan_salt, sizeof(vxlan_salt));
1490
1491 rc = register_pernet_device(&vxlan_net_ops);
1492 if (rc)
1493 goto out1;
1494
1495 rc = rtnl_link_register(&vxlan_link_ops);
1496 if (rc)
1497 goto out2;
1498
1499 return 0;
1500
1501out2:
1502 unregister_pernet_device(&vxlan_net_ops);
1503out1:
1504 return rc;
1505}
1506module_init(vxlan_init_module);
1507
1508static void __exit vxlan_cleanup_module(void)
1509{
1510 rtnl_link_unregister(&vxlan_link_ops);
1511 unregister_pernet_device(&vxlan_net_ops);
1512}
1513module_exit(vxlan_cleanup_module);
1514
1515MODULE_LICENSE("GPL");
1516MODULE_VERSION(VXLAN_VERSION);
1517MODULE_AUTHOR("Stephen Hemminger <shemminger@vyatta.com>");
1518MODULE_ALIAS_RTNL_LINK("vxlan");