blob: 2c319dd27f29740c2605dc2d93bb24170f289f4d [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002/* drivers/net/ifb.c:
Jamal Hadi Salim253af422006-01-08 22:34:25 -08003
4 The purpose of this driver is to provide a device that allows
5 for sharing of resources:
6
7 1) qdiscs/policies that are per device as opposed to system wide.
8 ifb allows for a device which can be redirected to thus providing
9 an impression of sharing.
10
11 2) Allows for queueing incoming traffic for shaping instead of
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012 dropping.
13
Jamal Hadi Salim253af422006-01-08 22:34:25 -080014 The original concept is based on what is known as the IMQ
15 driver initially written by Martin Devera, later rewritten
16 by Patrick McHardy and then maintained by Andre Correa.
17
18 You need the tc action mirror or redirect to feed this device
Hui Tangcf9207d2021-05-20 11:47:53 +080019 packets.
Jamal Hadi Salim253af422006-01-08 22:34:25 -080020
Jeff Garzik6aa20a22006-09-13 13:24:59 -040021
Hui Tangcf9207d2021-05-20 11:47:53 +080022 Authors: Jamal Hadi Salim (2005)
Jeff Garzik6aa20a22006-09-13 13:24:59 -040023
Jamal Hadi Salim253af422006-01-08 22:34:25 -080024*/
25
26
Jamal Hadi Salim253af422006-01-08 22:34:25 -080027#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000032#include <linux/interrupt.h>
Jamal Hadi Salim253af422006-01-08 22:34:25 -080033#include <linux/moduleparam.h>
Lukas Wunner42df6e12021-10-08 22:06:03 +020034#include <linux/netfilter_netdev.h>
Jeff Garzik6aa20a22006-09-13 13:24:59 -040035#include <net/pkt_sched.h>
Eric W. Biederman881d9662007-09-17 11:56:21 -070036#include <net/net_namespace.h>
Jamal Hadi Salim253af422006-01-08 22:34:25 -080037
Jamal Hadi Salim253af422006-01-08 22:34:25 -080038#define TX_Q_LIMIT 32
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020039struct ifb_q_private {
40 struct net_device *dev;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080041 struct tasklet_struct ifb_tasklet;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020042 int tasklet_pending;
43 int txqnum;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080044 struct sk_buff_head rq;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020045 u64 rx_packets;
46 u64 rx_bytes;
47 struct u64_stats_sync rsync;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +000048
49 struct u64_stats_sync tsync;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020050 u64 tx_packets;
51 u64 tx_bytes;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080052 struct sk_buff_head tq;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020053} ____cacheline_aligned_in_smp;
54
55struct ifb_dev_private {
56 struct ifb_q_private *tx_private;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080057};
58
Stephen Hemminger424efe92009-08-31 19:50:51 +000059static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
Jamal Hadi Salim253af422006-01-08 22:34:25 -080060static int ifb_open(struct net_device *dev);
61static int ifb_close(struct net_device *dev);
62
Emil Renner Berthing08267522021-01-31 00:47:24 +010063static void ifb_ri_tasklet(struct tasklet_struct *t)
Jamal Hadi Salim253af422006-01-08 22:34:25 -080064{
Emil Renner Berthing08267522021-01-31 00:47:24 +010065 struct ifb_q_private *txp = from_tasklet(txp, t, ifb_tasklet);
David S. Millerc3f26a22008-07-31 16:58:50 -070066 struct netdev_queue *txq;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080067 struct sk_buff *skb;
68
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020069 txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
70 skb = skb_peek(&txp->tq);
71 if (!skb) {
72 if (!__netif_tx_trylock(txq))
Jamal Hadi Salim253af422006-01-08 22:34:25 -080073 goto resched;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020074 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
75 __netif_tx_unlock(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -080076 }
77
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020078 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
Lukas Wunner42df6e12021-10-08 22:06:03 +020079 /* Skip tc and netfilter to prevent redirection loop. */
Pablo Neira Ayuso2c646052020-03-25 13:47:18 +010080 skb->redirected = 0;
Willem de Bruijne7246e12017-01-07 17:06:35 -050081 skb->tc_skip_classify = 1;
Lukas Wunner42df6e12021-10-08 22:06:03 +020082 nf_skip_egress(skb, true);
stephen hemminger3b0c9cb2011-06-20 11:42:30 +000083
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020084 u64_stats_update_begin(&txp->tsync);
85 txp->tx_packets++;
86 txp->tx_bytes += skb->len;
87 u64_stats_update_end(&txp->tsync);
Patrick McHardyc01003c2007-03-29 11:46:52 -070088
Eric Dumazet05e86892009-11-01 19:45:16 +000089 rcu_read_lock();
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020090 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
Patrick McHardyc01003c2007-03-29 11:46:52 -070091 if (!skb->dev) {
Eric Dumazet05e86892009-11-01 19:45:16 +000092 rcu_read_unlock();
Patrick McHardyc01003c2007-03-29 11:46:52 -070093 dev_kfree_skb(skb);
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020094 txp->dev->stats.tx_dropped++;
95 if (skb_queue_len(&txp->tq) != 0)
Changli Gao75c1c822010-12-04 14:09:08 +000096 goto resched;
Patrick McHardyc01003c2007-03-29 11:46:52 -070097 break;
98 }
Eric Dumazet05e86892009-11-01 19:45:16 +000099 rcu_read_unlock();
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200100 skb->skb_iif = txp->dev->ifindex;
Patrick McHardyc01003c2007-03-29 11:46:52 -0700101
Pablo Neira Ayuso2c646052020-03-25 13:47:18 +0100102 if (!skb->from_ingress) {
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800103 dev_queue_xmit(skb);
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500104 } else {
Jon Maxwellb1d2e4e2018-05-25 07:38:29 +1000105 skb_pull_rcsum(skb, skb->mac_len);
Eric Dumazet1a759722010-12-14 22:39:58 +0000106 netif_receive_skb(skb);
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500107 }
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800108 }
109
David S. Millerc3f26a22008-07-31 16:58:50 -0700110 if (__netif_tx_trylock(txq)) {
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200111 skb = skb_peek(&txp->rq);
112 if (!skb) {
113 txp->tasklet_pending = 0;
114 if (netif_tx_queue_stopped(txq))
115 netif_tx_wake_queue(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800116 } else {
David S. Millerc3f26a22008-07-31 16:58:50 -0700117 __netif_tx_unlock(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800118 goto resched;
119 }
David S. Millerc3f26a22008-07-31 16:58:50 -0700120 __netif_tx_unlock(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800121 } else {
122resched:
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200123 txp->tasklet_pending = 1;
124 tasklet_schedule(&txp->ifb_tasklet);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800125 }
126
127}
128
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800129static void ifb_stats64(struct net_device *dev,
130 struct rtnl_link_stats64 *stats)
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000131{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200132 struct ifb_dev_private *dp = netdev_priv(dev);
133 struct ifb_q_private *txp = dp->tx_private;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000134 unsigned int start;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200135 u64 packets, bytes;
136 int i;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000137
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200138 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
139 do {
140 start = u64_stats_fetch_begin_irq(&txp->rsync);
141 packets = txp->rx_packets;
142 bytes = txp->rx_bytes;
143 } while (u64_stats_fetch_retry_irq(&txp->rsync, start));
144 stats->rx_packets += packets;
145 stats->rx_bytes += bytes;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000146
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200147 do {
148 start = u64_stats_fetch_begin_irq(&txp->tsync);
149 packets = txp->tx_packets;
150 bytes = txp->tx_bytes;
151 } while (u64_stats_fetch_retry_irq(&txp->tsync, start));
152 stats->tx_packets += packets;
153 stats->tx_bytes += bytes;
154 }
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000155 stats->rx_dropped = dev->stats.rx_dropped;
156 stats->tx_dropped = dev->stats.tx_dropped;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000157}
158
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200159static int ifb_dev_init(struct net_device *dev)
160{
161 struct ifb_dev_private *dp = netdev_priv(dev);
162 struct ifb_q_private *txp;
163 int i;
164
165 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
166 if (!txp)
167 return -ENOMEM;
168 dp->tx_private = txp;
169 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
170 txp->txqnum = i;
171 txp->dev = dev;
172 __skb_queue_head_init(&txp->rq);
173 __skb_queue_head_init(&txp->tq);
174 u64_stats_init(&txp->rsync);
175 u64_stats_init(&txp->tsync);
Emil Renner Berthing08267522021-01-31 00:47:24 +0100176 tasklet_setup(&txp->ifb_tasklet, ifb_ri_tasklet);
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200177 netif_tx_start_queue(netdev_get_tx_queue(dev, i));
178 }
179 return 0;
180}
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000181
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800182static const struct net_device_ops ifb_netdev_ops = {
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800183 .ndo_open = ifb_open,
184 .ndo_stop = ifb_close,
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000185 .ndo_get_stats64 = ifb_stats64,
Stephen Hemminger00829822008-11-20 20:14:53 -0800186 .ndo_start_xmit = ifb_xmit,
187 .ndo_validate_addr = eth_validate_addr,
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200188 .ndo_init = ifb_dev_init,
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800189};
190
Michał Mirosław34324dc2011-11-15 15:29:55 +0000191#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
Alexander Lobakinecb8fed2020-11-01 13:17:17 +0000192 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
Patrick McHardy28d2b132013-04-19 02:04:32 +0000193 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
194 NETIF_F_HW_VLAN_STAG_TX)
Eric Dumazet39980292011-01-03 10:35:22 +0000195
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200196static void ifb_dev_free(struct net_device *dev)
197{
198 struct ifb_dev_private *dp = netdev_priv(dev);
199 struct ifb_q_private *txp = dp->tx_private;
200 int i;
201
202 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
203 tasklet_kill(&txp->ifb_tasklet);
204 __skb_queue_purge(&txp->rq);
205 __skb_queue_purge(&txp->tq);
206 }
207 kfree(dp->tx_private);
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200208}
209
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700210static void ifb_setup(struct net_device *dev)
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800211{
212 /* Initialize the device structure. */
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800213 dev->netdev_ops = &ifb_netdev_ops;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800214
215 /* Fill in device structure with ethernet-generic values. */
216 ether_setup(dev);
217 dev->tx_queue_len = TX_Q_LIMIT;
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800218
Eric Dumazet39980292011-01-03 10:35:22 +0000219 dev->features |= IFB_FEATURES;
Eric Dumazet7d9457962016-05-06 18:19:59 -0700220 dev->hw_features |= dev->features;
221 dev->hw_enc_features |= dev->features;
Vlad Yasevich8dd6e142014-03-27 22:14:47 -0400222 dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
223 NETIF_F_HW_VLAN_STAG_TX);
Eric Dumazet39980292011-01-03 10:35:22 +0000224
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800225 dev->flags |= IFF_NOARP;
226 dev->flags &= ~IFF_MULTICAST;
Eric Dumazet02875872014-10-05 18:38:35 -0700227 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
228 netif_keep_dst(dev);
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000229 eth_hw_addr_random(dev);
David S. Millercf124db2017-05-08 12:52:56 -0400230 dev->needs_free_netdev = true;
231 dev->priv_destructor = ifb_dev_free;
Zhang Shengjue94cd812017-09-22 23:57:49 +0800232
233 dev->min_mtu = 0;
234 dev->max_mtu = 0;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800235}
236
Stephen Hemminger424efe92009-08-31 19:50:51 +0000237static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800238{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200239 struct ifb_dev_private *dp = netdev_priv(dev);
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200240 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800241
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200242 u64_stats_update_begin(&txp->rsync);
243 txp->rx_packets++;
244 txp->rx_bytes += skb->len;
245 u64_stats_update_end(&txp->rsync);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800246
Pablo Neira Ayuso2c646052020-03-25 13:47:18 +0100247 if (!skb->redirected || !skb->skb_iif) {
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800248 dev_kfree_skb(skb);
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000249 dev->stats.rx_dropped++;
Stephen Hemminger424efe92009-08-31 19:50:51 +0000250 return NETDEV_TX_OK;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800251 }
252
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200253 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
254 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800255
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200256 __skb_queue_tail(&txp->rq, skb);
257 if (!txp->tasklet_pending) {
258 txp->tasklet_pending = 1;
259 tasklet_schedule(&txp->ifb_tasklet);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800260 }
261
Stephen Hemminger424efe92009-08-31 19:50:51 +0000262 return NETDEV_TX_OK;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800263}
264
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800265static int ifb_close(struct net_device *dev)
266{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200267 netif_tx_stop_all_queues(dev);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800268 return 0;
269}
270
271static int ifb_open(struct net_device *dev)
272{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200273 netif_tx_start_all_queues(dev);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800274 return 0;
275}
276
Matthias Schiffera8b8a8892017-06-25 23:56:01 +0200277static int ifb_validate(struct nlattr *tb[], struct nlattr *data[],
278 struct netlink_ext_ack *extack)
Patrick McHardy0e068772007-07-11 19:42:31 -0700279{
280 if (tb[IFLA_ADDRESS]) {
281 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
282 return -EINVAL;
283 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
284 return -EADDRNOTAVAIL;
285 }
286 return 0;
287}
288
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700289static struct rtnl_link_ops ifb_link_ops __read_mostly = {
290 .kind = "ifb",
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200291 .priv_size = sizeof(struct ifb_dev_private),
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700292 .setup = ifb_setup,
Patrick McHardy0e068772007-07-11 19:42:31 -0700293 .validate = ifb_validate,
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700294};
295
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200296/* Number of ifb devices to be set up by this module.
297 * Note that these legacy devices have one queue.
298 * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
299 */
300static int numifbs = 2;
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700301module_param(numifbs, int, 0);
302MODULE_PARM_DESC(numifbs, "Number of ifb devices");
303
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800304static int __init ifb_init_one(int index)
305{
306 struct net_device *dev_ifb;
307 int err;
308
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200309 dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
Tom Gundersenc835a672014-07-14 16:37:24 +0200310 NET_NAME_UNKNOWN, ifb_setup);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800311
312 if (!dev_ifb)
313 return -ENOMEM;
314
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700315 dev_ifb->rtnl_link_ops = &ifb_link_ops;
316 err = register_netdevice(dev_ifb);
317 if (err < 0)
318 goto err;
Jarek Poplawski94833df2008-03-20 17:05:13 -0700319
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700320 return 0;
321
322err:
323 free_netdev(dev_ifb);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800324 return err;
325}
326
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800327static int __init ifb_init_module(void)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400328{
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700329 int i, err;
330
Kirill Tkhai554873e2018-03-30 19:38:37 +0300331 down_write(&pernet_ops_rwsem);
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700332 rtnl_lock();
333 err = __rtnl_link_register(&ifb_link_ops);
dingtianhongf2966cd2013-07-11 19:04:06 +0800334 if (err < 0)
335 goto out;
Patrick McHardy62b7ffc2007-06-13 12:04:51 -0700336
dingtianhong440d57b2013-07-10 12:04:02 +0800337 for (i = 0; i < numifbs && !err; i++) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400338 err = ifb_init_one(i);
dingtianhong440d57b2013-07-10 12:04:02 +0800339 cond_resched();
340 }
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700341 if (err)
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700342 __rtnl_link_unregister(&ifb_link_ops);
dingtianhongf2966cd2013-07-11 19:04:06 +0800343
344out:
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700345 rtnl_unlock();
Kirill Tkhai554873e2018-03-30 19:38:37 +0300346 up_write(&pernet_ops_rwsem);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800347
348 return err;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400349}
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800350
351static void __exit ifb_cleanup_module(void)
352{
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700353 rtnl_link_unregister(&ifb_link_ops);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800354}
355
356module_init(ifb_init_module);
357module_exit(ifb_cleanup_module);
358MODULE_LICENSE("GPL");
359MODULE_AUTHOR("Jamal Hadi Salim");
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700360MODULE_ALIAS_RTNL_LINK("ifb");