blob: feb185389a87ed03cf24449dcf3b3121d72e5108 [file] [log] [blame]
Mahesh Bandewar2ad7bf32014-11-23 23:07:46 -08001/* Copyright (c) 2014 Mahesh Bandewar <maheshb@google.com>
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License as
5 * published by the Free Software Foundation; either version 2 of
6 * the License, or (at your option) any later version.
7 *
8 */
9
10#include "ipvlan.h"
11
12void ipvlan_adjust_mtu(struct ipvl_dev *ipvlan, struct net_device *dev)
13{
14 ipvlan->dev->mtu = dev->mtu - ipvlan->mtu_adj;
15}
16
17void ipvlan_set_port_mode(struct ipvl_port *port, u32 nval)
18{
19 struct ipvl_dev *ipvlan;
20
21 if (port->mode != nval) {
22 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
23 if (nval == IPVLAN_MODE_L3)
24 ipvlan->dev->flags |= IFF_NOARP;
25 else
26 ipvlan->dev->flags &= ~IFF_NOARP;
27 }
28 port->mode = nval;
29 }
30}
31
32static int ipvlan_port_create(struct net_device *dev)
33{
34 struct ipvl_port *port;
35 int err, idx;
36
37 if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) {
38 netdev_err(dev, "Master is either lo or non-ether device\n");
39 return -EINVAL;
40 }
41 port = kzalloc(sizeof(struct ipvl_port), GFP_KERNEL);
42 if (!port)
43 return -ENOMEM;
44
45 port->dev = dev;
46 port->mode = IPVLAN_MODE_L3;
47 INIT_LIST_HEAD(&port->ipvlans);
48 for (idx = 0; idx < IPVLAN_HASH_SIZE; idx++)
49 INIT_HLIST_HEAD(&port->hlhead[idx]);
50
51 err = netdev_rx_handler_register(dev, ipvlan_handle_frame, port);
52 if (err)
53 goto err;
54
55 dev->priv_flags |= IFF_IPVLAN_MASTER;
56 return 0;
57
58err:
59 kfree_rcu(port, rcu);
60 return err;
61}
62
63static void ipvlan_port_destroy(struct net_device *dev)
64{
65 struct ipvl_port *port = ipvlan_port_get_rtnl(dev);
66
67 dev->priv_flags &= ~IFF_IPVLAN_MASTER;
68 netdev_rx_handler_unregister(dev);
69 kfree_rcu(port, rcu);
70}
71
72/* ipvlan network devices have devices nesting below it and are a special
73 * "super class" of normal network devices; split their locks off into a
74 * separate class since they always nest.
75 */
76static struct lock_class_key ipvlan_netdev_xmit_lock_key;
77static struct lock_class_key ipvlan_netdev_addr_lock_key;
78
79#define IPVLAN_FEATURES \
80 (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
81 NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
82 NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \
83 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_VLAN_STAG_FILTER)
84
85#define IPVLAN_STATE_MASK \
86 ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT))
87
88static void ipvlan_set_lockdep_class_one(struct net_device *dev,
89 struct netdev_queue *txq,
90 void *_unused)
91{
92 lockdep_set_class(&txq->_xmit_lock, &ipvlan_netdev_xmit_lock_key);
93}
94
95static void ipvlan_set_lockdep_class(struct net_device *dev)
96{
97 lockdep_set_class(&dev->addr_list_lock, &ipvlan_netdev_addr_lock_key);
98 netdev_for_each_tx_queue(dev, ipvlan_set_lockdep_class_one, NULL);
99}
100
101static int ipvlan_init(struct net_device *dev)
102{
103 struct ipvl_dev *ipvlan = netdev_priv(dev);
104 const struct net_device *phy_dev = ipvlan->phy_dev;
105
106 dev->state = (dev->state & ~IPVLAN_STATE_MASK) |
107 (phy_dev->state & IPVLAN_STATE_MASK);
108 dev->features = phy_dev->features & IPVLAN_FEATURES;
109 dev->features |= NETIF_F_LLTX;
110 dev->gso_max_size = phy_dev->gso_max_size;
111 dev->iflink = phy_dev->ifindex;
112 dev->hard_header_len = phy_dev->hard_header_len;
113
114 ipvlan_set_lockdep_class(dev);
115
116 ipvlan->pcpu_stats = alloc_percpu(struct ipvl_pcpu_stats);
117 if (!ipvlan->pcpu_stats)
118 return -ENOMEM;
119
120 return 0;
121}
122
123static void ipvlan_uninit(struct net_device *dev)
124{
125 struct ipvl_dev *ipvlan = netdev_priv(dev);
126 struct ipvl_port *port = ipvlan->port;
127
Markus Elfring04901ce2014-11-29 16:23:20 +0100128 free_percpu(ipvlan->pcpu_stats);
Mahesh Bandewar2ad7bf32014-11-23 23:07:46 -0800129
130 port->count -= 1;
131 if (!port->count)
132 ipvlan_port_destroy(port->dev);
133}
134
135static int ipvlan_open(struct net_device *dev)
136{
137 struct ipvl_dev *ipvlan = netdev_priv(dev);
138 struct net_device *phy_dev = ipvlan->phy_dev;
139 struct ipvl_addr *addr;
140
141 if (ipvlan->port->mode == IPVLAN_MODE_L3)
142 dev->flags |= IFF_NOARP;
143 else
144 dev->flags &= ~IFF_NOARP;
145
146 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
147 list_for_each_entry(addr, &ipvlan->addrs, anode)
148 ipvlan_ht_addr_add(ipvlan, addr);
149 }
150 return dev_uc_add(phy_dev, phy_dev->dev_addr);
151}
152
153static int ipvlan_stop(struct net_device *dev)
154{
155 struct ipvl_dev *ipvlan = netdev_priv(dev);
156 struct net_device *phy_dev = ipvlan->phy_dev;
157 struct ipvl_addr *addr;
158
159 dev_uc_unsync(phy_dev, dev);
160 dev_mc_unsync(phy_dev, dev);
161
162 dev_uc_del(phy_dev, phy_dev->dev_addr);
163
164 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
165 list_for_each_entry(addr, &ipvlan->addrs, anode)
166 ipvlan_ht_addr_del(addr, !dev->dismantle);
167 }
168 return 0;
169}
170
Mahesh Bandewar92c7b0d2014-11-25 21:24:43 -0800171static netdev_tx_t ipvlan_start_xmit(struct sk_buff *skb,
172 struct net_device *dev)
Mahesh Bandewar2ad7bf32014-11-23 23:07:46 -0800173{
174 const struct ipvl_dev *ipvlan = netdev_priv(dev);
175 int skblen = skb->len;
176 int ret;
177
178 ret = ipvlan_queue_xmit(skb, dev);
179 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
180 struct ipvl_pcpu_stats *pcptr;
181
182 pcptr = this_cpu_ptr(ipvlan->pcpu_stats);
183
184 u64_stats_update_begin(&pcptr->syncp);
185 pcptr->tx_pkts++;
186 pcptr->tx_bytes += skblen;
187 u64_stats_update_end(&pcptr->syncp);
188 } else {
189 this_cpu_inc(ipvlan->pcpu_stats->tx_drps);
190 }
191 return ret;
192}
193
194static netdev_features_t ipvlan_fix_features(struct net_device *dev,
195 netdev_features_t features)
196{
197 struct ipvl_dev *ipvlan = netdev_priv(dev);
198
199 return features & (ipvlan->sfeatures | ~IPVLAN_FEATURES);
200}
201
202static void ipvlan_change_rx_flags(struct net_device *dev, int change)
203{
204 struct ipvl_dev *ipvlan = netdev_priv(dev);
205 struct net_device *phy_dev = ipvlan->phy_dev;
206
207 if (change & IFF_ALLMULTI)
208 dev_set_allmulti(phy_dev, dev->flags & IFF_ALLMULTI? 1 : -1);
209}
210
211static void ipvlan_set_broadcast_mac_filter(struct ipvl_dev *ipvlan, bool set)
212{
213 struct net_device *dev = ipvlan->dev;
214 unsigned int hashbit = ipvlan_mac_hash(dev->broadcast);
215
216 if (set && !test_bit(hashbit, ipvlan->mac_filters))
217 __set_bit(hashbit, ipvlan->mac_filters);
218 else if (!set && test_bit(hashbit, ipvlan->mac_filters))
219 __clear_bit(hashbit, ipvlan->mac_filters);
220}
221
222static void ipvlan_set_multicast_mac_filter(struct net_device *dev)
223{
224 struct ipvl_dev *ipvlan = netdev_priv(dev);
225
226 if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
227 bitmap_fill(ipvlan->mac_filters, IPVLAN_MAC_FILTER_SIZE);
228 } else {
229 struct netdev_hw_addr *ha;
230 DECLARE_BITMAP(mc_filters, IPVLAN_MAC_FILTER_SIZE);
231
232 bitmap_zero(mc_filters, IPVLAN_MAC_FILTER_SIZE);
233 netdev_for_each_mc_addr(ha, dev)
234 __set_bit(ipvlan_mac_hash(ha->addr), mc_filters);
235
236 bitmap_copy(ipvlan->mac_filters, mc_filters,
237 IPVLAN_MAC_FILTER_SIZE);
238 }
239 dev_uc_sync(ipvlan->phy_dev, dev);
240 dev_mc_sync(ipvlan->phy_dev, dev);
241}
242
243static struct rtnl_link_stats64 *ipvlan_get_stats64(struct net_device *dev,
244 struct rtnl_link_stats64 *s)
245{
246 struct ipvl_dev *ipvlan = netdev_priv(dev);
247
248 if (ipvlan->pcpu_stats) {
249 struct ipvl_pcpu_stats *pcptr;
250 u64 rx_pkts, rx_bytes, rx_mcast, tx_pkts, tx_bytes;
251 u32 rx_errs = 0, tx_drps = 0;
252 u32 strt;
253 int idx;
254
255 for_each_possible_cpu(idx) {
256 pcptr = per_cpu_ptr(ipvlan->pcpu_stats, idx);
257 do {
258 strt= u64_stats_fetch_begin_irq(&pcptr->syncp);
259 rx_pkts = pcptr->rx_pkts;
260 rx_bytes = pcptr->rx_bytes;
261 rx_mcast = pcptr->rx_mcast;
262 tx_pkts = pcptr->tx_pkts;
263 tx_bytes = pcptr->tx_bytes;
264 } while (u64_stats_fetch_retry_irq(&pcptr->syncp,
265 strt));
266
267 s->rx_packets += rx_pkts;
268 s->rx_bytes += rx_bytes;
269 s->multicast += rx_mcast;
270 s->tx_packets += tx_pkts;
271 s->tx_bytes += tx_bytes;
272
273 /* u32 values are updated without syncp protection. */
274 rx_errs += pcptr->rx_errs;
275 tx_drps += pcptr->tx_drps;
276 }
277 s->rx_errors = rx_errs;
278 s->rx_dropped = rx_errs;
279 s->tx_dropped = tx_drps;
280 }
281 return s;
282}
283
284static int ipvlan_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
285{
286 struct ipvl_dev *ipvlan = netdev_priv(dev);
287 struct net_device *phy_dev = ipvlan->phy_dev;
288
289 return vlan_vid_add(phy_dev, proto, vid);
290}
291
292static int ipvlan_vlan_rx_kill_vid(struct net_device *dev, __be16 proto,
293 u16 vid)
294{
295 struct ipvl_dev *ipvlan = netdev_priv(dev);
296 struct net_device *phy_dev = ipvlan->phy_dev;
297
298 vlan_vid_del(phy_dev, proto, vid);
299 return 0;
300}
301
302static const struct net_device_ops ipvlan_netdev_ops = {
303 .ndo_init = ipvlan_init,
304 .ndo_uninit = ipvlan_uninit,
305 .ndo_open = ipvlan_open,
306 .ndo_stop = ipvlan_stop,
307 .ndo_start_xmit = ipvlan_start_xmit,
308 .ndo_fix_features = ipvlan_fix_features,
309 .ndo_change_rx_flags = ipvlan_change_rx_flags,
310 .ndo_set_rx_mode = ipvlan_set_multicast_mac_filter,
311 .ndo_get_stats64 = ipvlan_get_stats64,
312 .ndo_vlan_rx_add_vid = ipvlan_vlan_rx_add_vid,
313 .ndo_vlan_rx_kill_vid = ipvlan_vlan_rx_kill_vid,
314};
315
316static int ipvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
317 unsigned short type, const void *daddr,
318 const void *saddr, unsigned len)
319{
320 const struct ipvl_dev *ipvlan = netdev_priv(dev);
321 struct net_device *phy_dev = ipvlan->phy_dev;
322
323 /* TODO Probably use a different field than dev_addr so that the
324 * mac-address on the virtual device is portable and can be carried
325 * while the packets use the mac-addr on the physical device.
326 */
327 return dev_hard_header(skb, phy_dev, type, daddr,
328 saddr ? : dev->dev_addr, len);
329}
330
331static const struct header_ops ipvlan_header_ops = {
332 .create = ipvlan_hard_header,
333 .rebuild = eth_rebuild_header,
334 .parse = eth_header_parse,
335 .cache = eth_header_cache,
336 .cache_update = eth_header_cache_update,
337};
338
339static int ipvlan_ethtool_get_settings(struct net_device *dev,
340 struct ethtool_cmd *cmd)
341{
342 const struct ipvl_dev *ipvlan = netdev_priv(dev);
343
344 return __ethtool_get_settings(ipvlan->phy_dev, cmd);
345}
346
347static void ipvlan_ethtool_get_drvinfo(struct net_device *dev,
348 struct ethtool_drvinfo *drvinfo)
349{
350 strlcpy(drvinfo->driver, IPVLAN_DRV, sizeof(drvinfo->driver));
351 strlcpy(drvinfo->version, IPV_DRV_VER, sizeof(drvinfo->version));
352}
353
354static u32 ipvlan_ethtool_get_msglevel(struct net_device *dev)
355{
356 const struct ipvl_dev *ipvlan = netdev_priv(dev);
357
358 return ipvlan->msg_enable;
359}
360
361static void ipvlan_ethtool_set_msglevel(struct net_device *dev, u32 value)
362{
363 struct ipvl_dev *ipvlan = netdev_priv(dev);
364
365 ipvlan->msg_enable = value;
366}
367
368static const struct ethtool_ops ipvlan_ethtool_ops = {
369 .get_link = ethtool_op_get_link,
370 .get_settings = ipvlan_ethtool_get_settings,
371 .get_drvinfo = ipvlan_ethtool_get_drvinfo,
372 .get_msglevel = ipvlan_ethtool_get_msglevel,
373 .set_msglevel = ipvlan_ethtool_set_msglevel,
374};
375
376static int ipvlan_nl_changelink(struct net_device *dev,
377 struct nlattr *tb[], struct nlattr *data[])
378{
379 struct ipvl_dev *ipvlan = netdev_priv(dev);
380 struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
381
382 if (data && data[IFLA_IPVLAN_MODE]) {
383 u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
384
385 ipvlan_set_port_mode(port, nmode);
386 }
387 return 0;
388}
389
390static size_t ipvlan_nl_getsize(const struct net_device *dev)
391{
392 return (0
393 + nla_total_size(2) /* IFLA_IPVLAN_MODE */
394 );
395}
396
397static int ipvlan_nl_validate(struct nlattr *tb[], struct nlattr *data[])
398{
399 if (data && data[IFLA_IPVLAN_MODE]) {
400 u16 mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
401
402 if (mode < IPVLAN_MODE_L2 || mode >= IPVLAN_MODE_MAX)
403 return -EINVAL;
404 }
405 return 0;
406}
407
408static int ipvlan_nl_fillinfo(struct sk_buff *skb,
409 const struct net_device *dev)
410{
411 struct ipvl_dev *ipvlan = netdev_priv(dev);
412 struct ipvl_port *port = ipvlan_port_get_rtnl(ipvlan->phy_dev);
413 int ret = -EINVAL;
414
415 if (!port)
416 goto err;
417
418 ret = -EMSGSIZE;
419 if (nla_put_u16(skb, IFLA_IPVLAN_MODE, port->mode))
420 goto err;
421
422 return 0;
423
424err:
425 return ret;
426}
427
428static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
429 struct nlattr *tb[], struct nlattr *data[])
430{
431 struct ipvl_dev *ipvlan = netdev_priv(dev);
432 struct ipvl_port *port;
433 struct net_device *phy_dev;
434 int err;
435
436 if (!tb[IFLA_LINK])
437 return -EINVAL;
438
439 phy_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
440 if (!phy_dev)
441 return -ENODEV;
442
443 if (ipvlan_dev_slave(phy_dev)) {
444 struct ipvl_dev *tmp = netdev_priv(phy_dev);
445
446 phy_dev = tmp->phy_dev;
447 } else if (!ipvlan_dev_master(phy_dev)) {
448 err = ipvlan_port_create(phy_dev);
449 if (err < 0)
450 return err;
451 }
452
453 port = ipvlan_port_get_rtnl(phy_dev);
454 if (data && data[IFLA_IPVLAN_MODE])
455 port->mode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
456
457 ipvlan->phy_dev = phy_dev;
458 ipvlan->dev = dev;
459 ipvlan->port = port;
460 ipvlan->sfeatures = IPVLAN_FEATURES;
461 INIT_LIST_HEAD(&ipvlan->addrs);
462 ipvlan->ipv4cnt = 0;
463 ipvlan->ipv6cnt = 0;
464
465 /* TODO Probably put random address here to be presented to the
466 * world but keep using the physical-dev address for the outgoing
467 * packets.
468 */
469 memcpy(dev->dev_addr, phy_dev->dev_addr, ETH_ALEN);
470
471 dev->priv_flags |= IFF_IPVLAN_SLAVE;
472
473 port->count += 1;
474 err = register_netdevice(dev);
475 if (err < 0)
476 goto ipvlan_destroy_port;
477
478 err = netdev_upper_dev_link(phy_dev, dev);
479 if (err)
480 goto ipvlan_destroy_port;
481
482 list_add_tail_rcu(&ipvlan->pnode, &port->ipvlans);
483 netif_stacked_transfer_operstate(phy_dev, dev);
484 return 0;
485
486ipvlan_destroy_port:
487 port->count -= 1;
488 if (!port->count)
489 ipvlan_port_destroy(phy_dev);
490
491 return err;
492}
493
494static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
495{
496 struct ipvl_dev *ipvlan = netdev_priv(dev);
497 struct ipvl_addr *addr, *next;
498
499 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) {
500 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
501 ipvlan_ht_addr_del(addr, !dev->dismantle);
502 list_del_rcu(&addr->anode);
503 }
504 }
505 list_del_rcu(&ipvlan->pnode);
506 unregister_netdevice_queue(dev, head);
507 netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
508}
509
510static void ipvlan_link_setup(struct net_device *dev)
511{
512 ether_setup(dev);
513
514 dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
515 dev->priv_flags |= IFF_UNICAST_FLT;
516 dev->netdev_ops = &ipvlan_netdev_ops;
517 dev->destructor = free_netdev;
518 dev->header_ops = &ipvlan_header_ops;
519 dev->ethtool_ops = &ipvlan_ethtool_ops;
520 dev->tx_queue_len = 0;
521}
522
523static const struct nla_policy ipvlan_nl_policy[IFLA_IPVLAN_MAX + 1] =
524{
525 [IFLA_IPVLAN_MODE] = { .type = NLA_U16 },
526};
527
528static struct rtnl_link_ops ipvlan_link_ops = {
529 .kind = "ipvlan",
530 .priv_size = sizeof(struct ipvl_dev),
531
532 .get_size = ipvlan_nl_getsize,
533 .policy = ipvlan_nl_policy,
534 .validate = ipvlan_nl_validate,
535 .fill_info = ipvlan_nl_fillinfo,
536 .changelink = ipvlan_nl_changelink,
537 .maxtype = IFLA_IPVLAN_MAX,
538
539 .setup = ipvlan_link_setup,
540 .newlink = ipvlan_link_new,
541 .dellink = ipvlan_link_delete,
542};
543
Mahesh Bandewar92c7b0d2014-11-25 21:24:43 -0800544static int ipvlan_link_register(struct rtnl_link_ops *ops)
Mahesh Bandewar2ad7bf32014-11-23 23:07:46 -0800545{
546 return rtnl_link_register(ops);
547}
548
549static int ipvlan_device_event(struct notifier_block *unused,
550 unsigned long event, void *ptr)
551{
552 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
553 struct ipvl_dev *ipvlan, *next;
554 struct ipvl_port *port;
555 LIST_HEAD(lst_kill);
556
557 if (!ipvlan_dev_master(dev))
558 return NOTIFY_DONE;
559
560 port = ipvlan_port_get_rtnl(dev);
561
562 switch (event) {
563 case NETDEV_CHANGE:
564 list_for_each_entry(ipvlan, &port->ipvlans, pnode)
565 netif_stacked_transfer_operstate(ipvlan->phy_dev,
566 ipvlan->dev);
567 break;
568
569 case NETDEV_UNREGISTER:
570 if (dev->reg_state != NETREG_UNREGISTERING)
571 break;
572
573 list_for_each_entry_safe(ipvlan, next, &port->ipvlans,
574 pnode)
575 ipvlan->dev->rtnl_link_ops->dellink(ipvlan->dev,
576 &lst_kill);
577 unregister_netdevice_many(&lst_kill);
578 break;
579
580 case NETDEV_FEAT_CHANGE:
581 list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
582 ipvlan->dev->features = dev->features & IPVLAN_FEATURES;
583 ipvlan->dev->gso_max_size = dev->gso_max_size;
584 netdev_features_change(ipvlan->dev);
585 }
586 break;
587
588 case NETDEV_CHANGEMTU:
589 list_for_each_entry(ipvlan, &port->ipvlans, pnode)
590 ipvlan_adjust_mtu(ipvlan, dev);
591 break;
592
593 case NETDEV_PRE_TYPE_CHANGE:
594 /* Forbid underlying device to change its type. */
595 return NOTIFY_BAD;
596 }
597 return NOTIFY_DONE;
598}
599
600static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
601{
602 struct ipvl_addr *addr;
603
604 if (ipvlan_addr_busy(ipvlan, ip6_addr, true)) {
605 netif_err(ipvlan, ifup, ipvlan->dev,
606 "Failed to add IPv6=%pI6c addr for %s intf\n",
607 ip6_addr, ipvlan->dev->name);
608 return -EINVAL;
609 }
610 addr = kzalloc(sizeof(struct ipvl_addr), GFP_ATOMIC);
611 if (!addr)
612 return -ENOMEM;
613
614 addr->master = ipvlan;
615 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
616 addr->atype = IPVL_IPV6;
617 list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
618 ipvlan->ipv6cnt++;
619 ipvlan_ht_addr_add(ipvlan, addr);
620
621 return 0;
622}
623
624static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
625{
626 struct ipvl_addr *addr;
627
628 addr = ipvlan_ht_addr_lookup(ipvlan->port, ip6_addr, true);
629 if (!addr)
630 return;
631
632 ipvlan_ht_addr_del(addr, true);
633 list_del_rcu(&addr->anode);
634 ipvlan->ipv6cnt--;
635 WARN_ON(ipvlan->ipv6cnt < 0);
636 kfree_rcu(addr, rcu);
637
638 return;
639}
640
641static int ipvlan_addr6_event(struct notifier_block *unused,
642 unsigned long event, void *ptr)
643{
644 struct inet6_ifaddr *if6 = (struct inet6_ifaddr *)ptr;
645 struct net_device *dev = (struct net_device *)if6->idev->dev;
646 struct ipvl_dev *ipvlan = netdev_priv(dev);
647
648 if (!ipvlan_dev_slave(dev))
649 return NOTIFY_DONE;
650
651 if (!ipvlan || !ipvlan->port)
652 return NOTIFY_DONE;
653
654 switch (event) {
655 case NETDEV_UP:
656 if (ipvlan_add_addr6(ipvlan, &if6->addr))
657 return NOTIFY_BAD;
658 break;
659
660 case NETDEV_DOWN:
661 ipvlan_del_addr6(ipvlan, &if6->addr);
662 break;
663 }
664
665 return NOTIFY_OK;
666}
667
668static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
669{
670 struct ipvl_addr *addr;
671
672 if (ipvlan_addr_busy(ipvlan, ip4_addr, false)) {
673 netif_err(ipvlan, ifup, ipvlan->dev,
674 "Failed to add IPv4=%pI4 on %s intf.\n",
675 ip4_addr, ipvlan->dev->name);
676 return -EINVAL;
677 }
678 addr = kzalloc(sizeof(struct ipvl_addr), GFP_KERNEL);
679 if (!addr)
680 return -ENOMEM;
681
682 addr->master = ipvlan;
683 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
684 addr->atype = IPVL_IPV4;
685 list_add_tail_rcu(&addr->anode, &ipvlan->addrs);
686 ipvlan->ipv4cnt++;
687 ipvlan_ht_addr_add(ipvlan, addr);
688 ipvlan_set_broadcast_mac_filter(ipvlan, true);
689
690 return 0;
691}
692
693static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
694{
695 struct ipvl_addr *addr;
696
697 addr = ipvlan_ht_addr_lookup(ipvlan->port, ip4_addr, false);
698 if (!addr)
699 return;
700
701 ipvlan_ht_addr_del(addr, true);
702 list_del_rcu(&addr->anode);
703 ipvlan->ipv4cnt--;
704 WARN_ON(ipvlan->ipv4cnt < 0);
705 if (!ipvlan->ipv4cnt)
706 ipvlan_set_broadcast_mac_filter(ipvlan, false);
707 kfree_rcu(addr, rcu);
708
709 return;
710}
711
712static int ipvlan_addr4_event(struct notifier_block *unused,
713 unsigned long event, void *ptr)
714{
715 struct in_ifaddr *if4 = (struct in_ifaddr *)ptr;
716 struct net_device *dev = (struct net_device *)if4->ifa_dev->dev;
717 struct ipvl_dev *ipvlan = netdev_priv(dev);
718 struct in_addr ip4_addr;
719
720 if (!ipvlan_dev_slave(dev))
721 return NOTIFY_DONE;
722
723 if (!ipvlan || !ipvlan->port)
724 return NOTIFY_DONE;
725
726 switch (event) {
727 case NETDEV_UP:
728 ip4_addr.s_addr = if4->ifa_address;
729 if (ipvlan_add_addr4(ipvlan, &ip4_addr))
730 return NOTIFY_BAD;
731 break;
732
733 case NETDEV_DOWN:
734 ip4_addr.s_addr = if4->ifa_address;
735 ipvlan_del_addr4(ipvlan, &ip4_addr);
736 break;
737 }
738
739 return NOTIFY_OK;
740}
741
742static struct notifier_block ipvlan_addr4_notifier_block __read_mostly = {
743 .notifier_call = ipvlan_addr4_event,
744};
745
746static struct notifier_block ipvlan_notifier_block __read_mostly = {
747 .notifier_call = ipvlan_device_event,
748};
749
750static struct notifier_block ipvlan_addr6_notifier_block __read_mostly = {
751 .notifier_call = ipvlan_addr6_event,
752};
753
754static int __init ipvlan_init_module(void)
755{
756 int err;
757
758 ipvlan_init_secret();
759 register_netdevice_notifier(&ipvlan_notifier_block);
760 register_inet6addr_notifier(&ipvlan_addr6_notifier_block);
761 register_inetaddr_notifier(&ipvlan_addr4_notifier_block);
762
763 err = ipvlan_link_register(&ipvlan_link_ops);
764 if (err < 0)
765 goto error;
766
767 return 0;
768error:
769 unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
770 unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
771 unregister_netdevice_notifier(&ipvlan_notifier_block);
772 return err;
773}
774
775static void __exit ipvlan_cleanup_module(void)
776{
777 rtnl_link_unregister(&ipvlan_link_ops);
778 unregister_netdevice_notifier(&ipvlan_notifier_block);
779 unregister_inetaddr_notifier(&ipvlan_addr4_notifier_block);
780 unregister_inet6addr_notifier(&ipvlan_addr6_notifier_block);
781}
782
783module_init(ipvlan_init_module);
784module_exit(ipvlan_cleanup_module);
785
786MODULE_LICENSE("GPL");
787MODULE_AUTHOR("Mahesh Bandewar <maheshb@google.com>");
788MODULE_DESCRIPTION("Driver for L3 (IPv6/IPv4) based VLANs");
789MODULE_ALIAS_RTNL_LINK("ipvlan");