blob: 1c64d5347b8e05a1e31eff7eb2e94925265544fd [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002/* drivers/net/ifb.c:
Jamal Hadi Salim253af422006-01-08 22:34:25 -08003
4 The purpose of this driver is to provide a device that allows
5 for sharing of resources:
6
7 1) qdiscs/policies that are per device as opposed to system wide.
8 ifb allows for a device which can be redirected to thus providing
9 an impression of sharing.
10
11 2) Allows for queueing incoming traffic for shaping instead of
Jeff Garzik6aa20a22006-09-13 13:24:59 -040012 dropping.
13
Jamal Hadi Salim253af422006-01-08 22:34:25 -080014 The original concept is based on what is known as the IMQ
15 driver initially written by Martin Devera, later rewritten
16 by Patrick McHardy and then maintained by Andre Correa.
17
18 You need the tc action mirror or redirect to feed this device
Hui Tangcf9207d2021-05-20 11:47:53 +080019 packets.
Jamal Hadi Salim253af422006-01-08 22:34:25 -080020
Jeff Garzik6aa20a22006-09-13 13:24:59 -040021
Hui Tangcf9207d2021-05-20 11:47:53 +080022 Authors: Jamal Hadi Salim (2005)
Jeff Garzik6aa20a22006-09-13 13:24:59 -040023
Jamal Hadi Salim253af422006-01-08 22:34:25 -080024*/
25
26
Jamal Hadi Salim253af422006-01-08 22:34:25 -080027#include <linux/module.h>
28#include <linux/kernel.h>
29#include <linux/netdevice.h>
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +080030#include <linux/ethtool.h>
Jamal Hadi Salim253af422006-01-08 22:34:25 -080031#include <linux/etherdevice.h>
32#include <linux/init.h>
Alexey Dobriyana6b7a402011-06-06 10:43:46 +000033#include <linux/interrupt.h>
Jamal Hadi Salim253af422006-01-08 22:34:25 -080034#include <linux/moduleparam.h>
Lukas Wunner42df6e12021-10-08 22:06:03 +020035#include <linux/netfilter_netdev.h>
Jeff Garzik6aa20a22006-09-13 13:24:59 -040036#include <net/pkt_sched.h>
Eric W. Biederman881d9662007-09-17 11:56:21 -070037#include <net/net_namespace.h>
Jamal Hadi Salim253af422006-01-08 22:34:25 -080038
Jamal Hadi Salim253af422006-01-08 22:34:25 -080039#define TX_Q_LIMIT 32
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +080040
41struct ifb_q_stats {
42 u64 packets;
43 u64 bytes;
44 struct u64_stats_sync sync;
45};
46
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020047struct ifb_q_private {
48 struct net_device *dev;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080049 struct tasklet_struct ifb_tasklet;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020050 int tasklet_pending;
51 int txqnum;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080052 struct sk_buff_head rq;
53 struct sk_buff_head tq;
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +080054 struct ifb_q_stats rx_stats;
55 struct ifb_q_stats tx_stats;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020056} ____cacheline_aligned_in_smp;
57
58struct ifb_dev_private {
59 struct ifb_q_private *tx_private;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080060};
61
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +080062/* For ethtools stats. */
63struct ifb_q_stats_desc {
64 char desc[ETH_GSTRING_LEN];
65 size_t offset;
66};
67
68#define IFB_Q_STAT(m) offsetof(struct ifb_q_stats, m)
69
70static const struct ifb_q_stats_desc ifb_q_stats_desc[] = {
71 { "packets", IFB_Q_STAT(packets) },
72 { "bytes", IFB_Q_STAT(bytes) },
73};
74
75#define IFB_Q_STATS_LEN ARRAY_SIZE(ifb_q_stats_desc)
76
Stephen Hemminger424efe92009-08-31 19:50:51 +000077static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev);
Jamal Hadi Salim253af422006-01-08 22:34:25 -080078static int ifb_open(struct net_device *dev);
79static int ifb_close(struct net_device *dev);
80
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +080081static void ifb_update_q_stats(struct ifb_q_stats *stats, int len)
82{
83 u64_stats_update_begin(&stats->sync);
84 stats->packets++;
85 stats->bytes += len;
86 u64_stats_update_end(&stats->sync);
87}
88
Emil Renner Berthing08267522021-01-31 00:47:24 +010089static void ifb_ri_tasklet(struct tasklet_struct *t)
Jamal Hadi Salim253af422006-01-08 22:34:25 -080090{
Emil Renner Berthing08267522021-01-31 00:47:24 +010091 struct ifb_q_private *txp = from_tasklet(txp, t, ifb_tasklet);
David S. Millerc3f26a22008-07-31 16:58:50 -070092 struct netdev_queue *txq;
Jamal Hadi Salim253af422006-01-08 22:34:25 -080093 struct sk_buff *skb;
94
Eric Dumazet9e29e21a2015-07-06 22:05:28 +020095 txq = netdev_get_tx_queue(txp->dev, txp->txqnum);
96 skb = skb_peek(&txp->tq);
97 if (!skb) {
98 if (!__netif_tx_trylock(txq))
Jamal Hadi Salim253af422006-01-08 22:34:25 -080099 goto resched;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200100 skb_queue_splice_tail_init(&txp->rq, &txp->tq);
101 __netif_tx_unlock(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800102 }
103
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200104 while ((skb = __skb_dequeue(&txp->tq)) != NULL) {
Lukas Wunner42df6e12021-10-08 22:06:03 +0200105 /* Skip tc and netfilter to prevent redirection loop. */
Pablo Neira Ayuso2c646052020-03-25 13:47:18 +0100106 skb->redirected = 0;
Arnd Bergmann7444d702021-10-29 13:30:51 +0200107#ifdef CONFIG_NET_CLS_ACT
Willem de Bruijne7246e12017-01-07 17:06:35 -0500108 skb->tc_skip_classify = 1;
Arnd Bergmann7444d702021-10-29 13:30:51 +0200109#endif
Lukas Wunner42df6e12021-10-08 22:06:03 +0200110 nf_skip_egress(skb, true);
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000111
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +0800112 ifb_update_q_stats(&txp->tx_stats, skb->len);
Patrick McHardyc01003c2007-03-29 11:46:52 -0700113
Eric Dumazet05e86892009-11-01 19:45:16 +0000114 rcu_read_lock();
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200115 skb->dev = dev_get_by_index_rcu(dev_net(txp->dev), skb->skb_iif);
Patrick McHardyc01003c2007-03-29 11:46:52 -0700116 if (!skb->dev) {
Eric Dumazet05e86892009-11-01 19:45:16 +0000117 rcu_read_unlock();
Patrick McHardyc01003c2007-03-29 11:46:52 -0700118 dev_kfree_skb(skb);
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200119 txp->dev->stats.tx_dropped++;
120 if (skb_queue_len(&txp->tq) != 0)
Changli Gao75c1c822010-12-04 14:09:08 +0000121 goto resched;
Patrick McHardyc01003c2007-03-29 11:46:52 -0700122 break;
123 }
Eric Dumazet05e86892009-11-01 19:45:16 +0000124 rcu_read_unlock();
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200125 skb->skb_iif = txp->dev->ifindex;
Patrick McHardyc01003c2007-03-29 11:46:52 -0700126
Pablo Neira Ayuso2c646052020-03-25 13:47:18 +0100127 if (!skb->from_ingress) {
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800128 dev_queue_xmit(skb);
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500129 } else {
Jon Maxwellb1d2e4e2018-05-25 07:38:29 +1000130 skb_pull_rcsum(skb, skb->mac_len);
Eric Dumazet1a759722010-12-14 22:39:58 +0000131 netif_receive_skb(skb);
Willem de Bruijnbc31c902017-01-07 17:06:38 -0500132 }
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800133 }
134
David S. Millerc3f26a22008-07-31 16:58:50 -0700135 if (__netif_tx_trylock(txq)) {
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200136 skb = skb_peek(&txp->rq);
137 if (!skb) {
138 txp->tasklet_pending = 0;
139 if (netif_tx_queue_stopped(txq))
140 netif_tx_wake_queue(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800141 } else {
David S. Millerc3f26a22008-07-31 16:58:50 -0700142 __netif_tx_unlock(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800143 goto resched;
144 }
David S. Millerc3f26a22008-07-31 16:58:50 -0700145 __netif_tx_unlock(txq);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800146 } else {
147resched:
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200148 txp->tasklet_pending = 1;
149 tasklet_schedule(&txp->ifb_tasklet);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800150 }
151
152}
153
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800154static void ifb_stats64(struct net_device *dev,
155 struct rtnl_link_stats64 *stats)
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000156{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200157 struct ifb_dev_private *dp = netdev_priv(dev);
158 struct ifb_q_private *txp = dp->tx_private;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000159 unsigned int start;
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200160 u64 packets, bytes;
161 int i;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000162
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200163 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
164 do {
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +0800165 start = u64_stats_fetch_begin_irq(&txp->rx_stats.sync);
166 packets = txp->rx_stats.packets;
167 bytes = txp->rx_stats.bytes;
168 } while (u64_stats_fetch_retry_irq(&txp->rx_stats.sync, start));
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200169 stats->rx_packets += packets;
170 stats->rx_bytes += bytes;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000171
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200172 do {
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +0800173 start = u64_stats_fetch_begin_irq(&txp->tx_stats.sync);
174 packets = txp->tx_stats.packets;
175 bytes = txp->tx_stats.bytes;
176 } while (u64_stats_fetch_retry_irq(&txp->tx_stats.sync, start));
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200177 stats->tx_packets += packets;
178 stats->tx_bytes += bytes;
179 }
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000180 stats->rx_dropped = dev->stats.rx_dropped;
181 stats->tx_dropped = dev->stats.tx_dropped;
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000182}
183
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200184static int ifb_dev_init(struct net_device *dev)
185{
186 struct ifb_dev_private *dp = netdev_priv(dev);
187 struct ifb_q_private *txp;
188 int i;
189
190 txp = kcalloc(dev->num_tx_queues, sizeof(*txp), GFP_KERNEL);
191 if (!txp)
192 return -ENOMEM;
193 dp->tx_private = txp;
194 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
195 txp->txqnum = i;
196 txp->dev = dev;
197 __skb_queue_head_init(&txp->rq);
198 __skb_queue_head_init(&txp->tq);
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +0800199 u64_stats_init(&txp->rx_stats.sync);
200 u64_stats_init(&txp->tx_stats.sync);
Emil Renner Berthing08267522021-01-31 00:47:24 +0100201 tasklet_setup(&txp->ifb_tasklet, ifb_ri_tasklet);
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200202 netif_tx_start_queue(netdev_get_tx_queue(dev, i));
203 }
204 return 0;
205}
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000206
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +0800207static void ifb_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
208{
209 u8 *p = buf;
210 int i, j;
211
212 switch (stringset) {
213 case ETH_SS_STATS:
214 for (i = 0; i < dev->real_num_rx_queues; i++)
215 for (j = 0; j < IFB_Q_STATS_LEN; j++)
216 ethtool_sprintf(&p, "rx_queue_%u_%.18s",
217 i, ifb_q_stats_desc[j].desc);
218
219 for (i = 0; i < dev->real_num_tx_queues; i++)
220 for (j = 0; j < IFB_Q_STATS_LEN; j++)
221 ethtool_sprintf(&p, "tx_queue_%u_%.18s",
222 i, ifb_q_stats_desc[j].desc);
223
224 break;
225 }
226}
227
228static int ifb_get_sset_count(struct net_device *dev, int sset)
229{
230 switch (sset) {
231 case ETH_SS_STATS:
232 return IFB_Q_STATS_LEN * (dev->real_num_rx_queues +
233 dev->real_num_tx_queues);
234 default:
235 return -EOPNOTSUPP;
236 }
237}
238
239static void ifb_fill_stats_data(u64 **data,
240 struct ifb_q_stats *q_stats)
241{
242 void *stats_base = (void *)q_stats;
243 unsigned int start;
244 size_t offset;
245 int j;
246
247 do {
248 start = u64_stats_fetch_begin_irq(&q_stats->sync);
249 for (j = 0; j < IFB_Q_STATS_LEN; j++) {
250 offset = ifb_q_stats_desc[j].offset;
251 (*data)[j] = *(u64 *)(stats_base + offset);
252 }
253 } while (u64_stats_fetch_retry_irq(&q_stats->sync, start));
254
255 *data += IFB_Q_STATS_LEN;
256}
257
258static void ifb_get_ethtool_stats(struct net_device *dev,
259 struct ethtool_stats *stats, u64 *data)
260{
261 struct ifb_dev_private *dp = netdev_priv(dev);
262 struct ifb_q_private *txp;
263 int i;
264
265 for (i = 0; i < dev->real_num_rx_queues; i++) {
266 txp = dp->tx_private + i;
267 ifb_fill_stats_data(&data, &txp->rx_stats);
268 }
269
270 for (i = 0; i < dev->real_num_tx_queues; i++) {
271 txp = dp->tx_private + i;
272 ifb_fill_stats_data(&data, &txp->tx_stats);
273 }
274}
275
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800276static const struct net_device_ops ifb_netdev_ops = {
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800277 .ndo_open = ifb_open,
278 .ndo_stop = ifb_close,
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000279 .ndo_get_stats64 = ifb_stats64,
Stephen Hemminger00829822008-11-20 20:14:53 -0800280 .ndo_start_xmit = ifb_xmit,
281 .ndo_validate_addr = eth_validate_addr,
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200282 .ndo_init = ifb_dev_init,
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800283};
284
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +0800285static const struct ethtool_ops ifb_ethtool_ops = {
286 .get_strings = ifb_get_strings,
287 .get_sset_count = ifb_get_sset_count,
288 .get_ethtool_stats = ifb_get_ethtool_stats,
289};
290
Michał Mirosław34324dc2011-11-15 15:29:55 +0000291#define IFB_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
Alexander Lobakinecb8fed2020-11-01 13:17:17 +0000292 NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL | \
Patrick McHardy28d2b132013-04-19 02:04:32 +0000293 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_CTAG_TX | \
294 NETIF_F_HW_VLAN_STAG_TX)
Eric Dumazet39980292011-01-03 10:35:22 +0000295
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200296static void ifb_dev_free(struct net_device *dev)
297{
298 struct ifb_dev_private *dp = netdev_priv(dev);
299 struct ifb_q_private *txp = dp->tx_private;
300 int i;
301
302 for (i = 0; i < dev->num_tx_queues; i++,txp++) {
303 tasklet_kill(&txp->ifb_tasklet);
304 __skb_queue_purge(&txp->rq);
305 __skb_queue_purge(&txp->tq);
306 }
307 kfree(dp->tx_private);
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200308}
309
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700310static void ifb_setup(struct net_device *dev)
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800311{
312 /* Initialize the device structure. */
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800313 dev->netdev_ops = &ifb_netdev_ops;
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +0800314 dev->ethtool_ops = &ifb_ethtool_ops;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800315
316 /* Fill in device structure with ethernet-generic values. */
317 ether_setup(dev);
318 dev->tx_queue_len = TX_Q_LIMIT;
Stephen Hemminger8dfcdf32008-11-19 21:47:07 -0800319
Eric Dumazet39980292011-01-03 10:35:22 +0000320 dev->features |= IFB_FEATURES;
Eric Dumazet7d9457962016-05-06 18:19:59 -0700321 dev->hw_features |= dev->features;
322 dev->hw_enc_features |= dev->features;
Vlad Yasevich8dd6e142014-03-27 22:14:47 -0400323 dev->vlan_features |= IFB_FEATURES & ~(NETIF_F_HW_VLAN_CTAG_TX |
324 NETIF_F_HW_VLAN_STAG_TX);
Eric Dumazet39980292011-01-03 10:35:22 +0000325
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800326 dev->flags |= IFF_NOARP;
327 dev->flags &= ~IFF_MULTICAST;
Eric Dumazet02875872014-10-05 18:38:35 -0700328 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
329 netif_keep_dst(dev);
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000330 eth_hw_addr_random(dev);
David S. Millercf124db2017-05-08 12:52:56 -0400331 dev->needs_free_netdev = true;
332 dev->priv_destructor = ifb_dev_free;
Zhang Shengjue94cd812017-09-22 23:57:49 +0800333
334 dev->min_mtu = 0;
335 dev->max_mtu = 0;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800336}
337
Stephen Hemminger424efe92009-08-31 19:50:51 +0000338static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800339{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200340 struct ifb_dev_private *dp = netdev_priv(dev);
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200341 struct ifb_q_private *txp = dp->tx_private + skb_get_queue_mapping(skb);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800342
Tonghao Zhanga21ee5b2021-11-28 09:46:31 +0800343 ifb_update_q_stats(&txp->rx_stats, skb->len);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800344
Pablo Neira Ayuso2c646052020-03-25 13:47:18 +0100345 if (!skb->redirected || !skb->skb_iif) {
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800346 dev_kfree_skb(skb);
stephen hemminger3b0c9cb2011-06-20 11:42:30 +0000347 dev->stats.rx_dropped++;
Stephen Hemminger424efe92009-08-31 19:50:51 +0000348 return NETDEV_TX_OK;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800349 }
350
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200351 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len)
352 netif_tx_stop_queue(netdev_get_tx_queue(dev, txp->txqnum));
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800353
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200354 __skb_queue_tail(&txp->rq, skb);
355 if (!txp->tasklet_pending) {
356 txp->tasklet_pending = 1;
357 tasklet_schedule(&txp->ifb_tasklet);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800358 }
359
Stephen Hemminger424efe92009-08-31 19:50:51 +0000360 return NETDEV_TX_OK;
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800361}
362
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800363static int ifb_close(struct net_device *dev)
364{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200365 netif_tx_stop_all_queues(dev);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800366 return 0;
367}
368
369static int ifb_open(struct net_device *dev)
370{
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200371 netif_tx_start_all_queues(dev);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800372 return 0;
373}
374
Matthias Schiffera8b8a8892017-06-25 23:56:01 +0200375static int ifb_validate(struct nlattr *tb[], struct nlattr *data[],
376 struct netlink_ext_ack *extack)
Patrick McHardy0e068772007-07-11 19:42:31 -0700377{
378 if (tb[IFLA_ADDRESS]) {
379 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
380 return -EINVAL;
381 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
382 return -EADDRNOTAVAIL;
383 }
384 return 0;
385}
386
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700387static struct rtnl_link_ops ifb_link_ops __read_mostly = {
388 .kind = "ifb",
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200389 .priv_size = sizeof(struct ifb_dev_private),
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700390 .setup = ifb_setup,
Patrick McHardy0e068772007-07-11 19:42:31 -0700391 .validate = ifb_validate,
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700392};
393
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200394/* Number of ifb devices to be set up by this module.
395 * Note that these legacy devices have one queue.
396 * Prefer something like : ip link add ifb10 numtxqueues 8 type ifb
397 */
398static int numifbs = 2;
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700399module_param(numifbs, int, 0);
400MODULE_PARM_DESC(numifbs, "Number of ifb devices");
401
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800402static int __init ifb_init_one(int index)
403{
404 struct net_device *dev_ifb;
405 int err;
406
Eric Dumazet9e29e21a2015-07-06 22:05:28 +0200407 dev_ifb = alloc_netdev(sizeof(struct ifb_dev_private), "ifb%d",
Tom Gundersenc835a672014-07-14 16:37:24 +0200408 NET_NAME_UNKNOWN, ifb_setup);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800409
410 if (!dev_ifb)
411 return -ENOMEM;
412
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700413 dev_ifb->rtnl_link_ops = &ifb_link_ops;
414 err = register_netdevice(dev_ifb);
415 if (err < 0)
416 goto err;
Jarek Poplawski94833df2008-03-20 17:05:13 -0700417
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700418 return 0;
419
420err:
421 free_netdev(dev_ifb);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800422 return err;
423}
424
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800425static int __init ifb_init_module(void)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400426{
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700427 int i, err;
428
Kirill Tkhai554873e2018-03-30 19:38:37 +0300429 down_write(&pernet_ops_rwsem);
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700430 rtnl_lock();
431 err = __rtnl_link_register(&ifb_link_ops);
dingtianhongf2966cd2013-07-11 19:04:06 +0800432 if (err < 0)
433 goto out;
Patrick McHardy62b7ffc2007-06-13 12:04:51 -0700434
dingtianhong440d57b2013-07-10 12:04:02 +0800435 for (i = 0; i < numifbs && !err; i++) {
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400436 err = ifb_init_one(i);
dingtianhong440d57b2013-07-10 12:04:02 +0800437 cond_resched();
438 }
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700439 if (err)
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700440 __rtnl_link_unregister(&ifb_link_ops);
dingtianhongf2966cd2013-07-11 19:04:06 +0800441
442out:
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700443 rtnl_unlock();
Kirill Tkhai554873e2018-03-30 19:38:37 +0300444 up_write(&pernet_ops_rwsem);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800445
446 return err;
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400447}
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800448
449static void __exit ifb_cleanup_module(void)
450{
Patrick McHardy2d85cba2007-07-11 19:42:13 -0700451 rtnl_link_unregister(&ifb_link_ops);
Jamal Hadi Salim253af422006-01-08 22:34:25 -0800452}
453
454module_init(ifb_init_module);
455module_exit(ifb_cleanup_module);
456MODULE_LICENSE("GPL");
457MODULE_AUTHOR("Jamal Hadi Salim");
Patrick McHardy9ba2cd62007-06-13 12:05:06 -0700458MODULE_ALIAS_RTNL_LINK("ifb");