blob: 2f1f0a3784083088bf9cfeb3b4c84e1391fb9e54 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/* net/sched/sch_teql.c "True" (or "trivial") link equalizer.
3 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
5 */
6
7#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/types.h>
9#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/errno.h>
Arnaldo Carvalho de Melo14c85022005-12-27 02:43:12 -020013#include <linux/if_arp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/netdevice.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/skbuff.h>
17#include <linux/moduleparam.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070018#include <net/dst.h>
19#include <net/neighbour.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <net/pkt_sched.h>
21
22/*
23 How to setup it.
24 ----------------
25
26 After loading this module you will find a new device teqlN
27 and new qdisc with the same name. To join a slave to the equalizer
28 you should just set this qdisc on a device f.e.
29
30 # tc qdisc add dev eth0 root teql0
31 # tc qdisc add dev eth1 root teql0
32
33 That's all. Full PnP 8)
34
35 Applicability.
36 --------------
37
38 1. Slave devices MUST be active devices, i.e., they must raise the tbusy
39 signal and generate EOI events. If you want to equalize virtual devices
40 like tunnels, use a normal eql device.
41 2. This device puts no limitations on physical slave characteristics
42 f.e. it will equalize 9600baud line and 100Mb ethernet perfectly :-)
43 Certainly, large difference in link speeds will make the resulting
44 eqalized link unusable, because of huge packet reordering.
45 I estimate an upper useful difference as ~10 times.
46 3. If the slave requires address resolution, only protocols using
47 neighbour cache (IPv4/IPv6) will work over the equalized link.
48 Other protocols are still allowed to use the slave device directly,
49 which will not break load balancing, though native slave
50 traffic will have the highest priority. */
51
Eric Dumazetcc7ec452011-01-19 19:26:56 +000052struct teql_master {
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 struct Qdisc_ops qops;
54 struct net_device *dev;
55 struct Qdisc *slaves;
56 struct list_head master_list;
Eric Dumazet1ac9ad12011-01-12 12:13:14 +000057 unsigned long tx_bytes;
58 unsigned long tx_packets;
59 unsigned long tx_errors;
60 unsigned long tx_dropped;
Linus Torvalds1da177e2005-04-16 15:20:36 -070061};
62
Eric Dumazetcc7ec452011-01-19 19:26:56 +000063struct teql_sched_data {
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 struct Qdisc *next;
65 struct teql_master *m;
Linus Torvalds1da177e2005-04-16 15:20:36 -070066 struct sk_buff_head q;
67};
68
Eric Dumazetcc7ec452011-01-19 19:26:56 +000069#define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070
Eric Dumazetcc7ec452011-01-19 19:26:56 +000071#define FMASK (IFF_BROADCAST | IFF_POINTOPOINT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73/* "teql*" qdisc routines */
74
75static int
Eric Dumazet520ac302016-06-21 23:16:49 -070076teql_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -070077{
David S. Miller5ce2d482008-07-08 17:06:30 -070078 struct net_device *dev = qdisc_dev(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 struct teql_sched_data *q = qdisc_priv(sch);
80
Krishna Kumar4cd8c9e2007-05-08 18:57:50 -070081 if (q->q.qlen < dev->tx_queue_len) {
82 __skb_queue_tail(&q->q, skb);
Ben Greear9871e502010-08-10 01:45:40 -070083 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 }
85
Eric Dumazet520ac302016-06-21 23:16:49 -070086 return qdisc_drop(skb, sch, to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089static struct sk_buff *
Eric Dumazetcc7ec452011-01-19 19:26:56 +000090teql_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -070091{
92 struct teql_sched_data *dat = qdisc_priv(sch);
David S. Millerb0e1e642008-07-08 17:42:10 -070093 struct netdev_queue *dat_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 struct sk_buff *skb;
John Fastabend46e5da40a2014-09-12 20:04:52 -070095 struct Qdisc *q;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97 skb = __skb_dequeue(&dat->q);
David S. Millere8a04642008-07-17 00:34:19 -070098 dat_queue = netdev_get_tx_queue(dat->m->dev, 0);
John Fastabend46e5da40a2014-09-12 20:04:52 -070099 q = rcu_dereference_bh(dat_queue->qdisc);
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 if (skb == NULL) {
John Fastabend46e5da40a2014-09-12 20:04:52 -0700102 struct net_device *m = qdisc_dev(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 if (m) {
104 dat->m->slaves = sch;
105 netif_wake_queue(m);
106 }
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800107 } else {
108 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 }
John Fastabend46e5da40a2014-09-12 20:04:52 -0700110 sch->q.qlen = dat->q.qlen + q->q.qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 return skb;
112}
113
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700114static struct sk_buff *
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000115teql_peek(struct Qdisc *sch)
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700116{
117 /* teql is meant to be used as root qdisc */
118 return NULL;
119}
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121static void
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000122teql_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
124 struct teql_sched_data *dat = qdisc_priv(sch);
125
126 skb_queue_purge(&dat->q);
127 sch->q.qlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
130static void
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000131teql_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
133 struct Qdisc *q, *prev;
134 struct teql_sched_data *dat = qdisc_priv(sch);
135 struct teql_master *master = dat->m;
136
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000137 prev = master->slaves;
138 if (prev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 do {
140 q = NEXT_SLAVE(prev);
141 if (q == sch) {
142 NEXT_SLAVE(prev) = NEXT_SLAVE(q);
143 if (q == master->slaves) {
144 master->slaves = NEXT_SLAVE(q);
145 if (q == master->slaves) {
David S. Millere8a04642008-07-17 00:34:19 -0700146 struct netdev_queue *txq;
David S. Miller838740002008-07-17 00:53:03 -0700147 spinlock_t *root_lock;
David S. Millere8a04642008-07-17 00:34:19 -0700148
149 txq = netdev_get_tx_queue(master->dev, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 master->slaves = NULL;
David S. Miller838740002008-07-17 00:53:03 -0700151
John Fastabend46e5da40a2014-09-12 20:04:52 -0700152 root_lock = qdisc_root_sleeping_lock(rtnl_dereference(txq->qdisc));
David S. Miller838740002008-07-17 00:53:03 -0700153 spin_lock_bh(root_lock);
John Fastabend46e5da40a2014-09-12 20:04:52 -0700154 qdisc_reset(rtnl_dereference(txq->qdisc));
David S. Miller838740002008-07-17 00:53:03 -0700155 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 }
157 }
158 skb_queue_purge(&dat->q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 break;
160 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 } while ((prev = q) != master->slaves);
163 }
164}
165
Alexander Aringe63d7df2017-12-20 12:35:13 -0500166static int teql_qdisc_init(struct Qdisc *sch, struct nlattr *opt,
167 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168{
David S. Miller5ce2d482008-07-08 17:06:30 -0700169 struct net_device *dev = qdisc_dev(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000170 struct teql_master *m = (struct teql_master *)sch->ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 struct teql_sched_data *q = qdisc_priv(sch);
172
173 if (dev->hard_header_len > m->dev->hard_header_len)
174 return -EINVAL;
175
176 if (m->dev == dev)
177 return -ELOOP;
178
179 q->m = m;
180
181 skb_queue_head_init(&q->q);
182
183 if (m->slaves) {
184 if (m->dev->flags & IFF_UP) {
Joe Perchesf64f9e72009-11-29 16:55:45 -0800185 if ((m->dev->flags & IFF_POINTOPOINT &&
186 !(dev->flags & IFF_POINTOPOINT)) ||
187 (m->dev->flags & IFF_BROADCAST &&
188 !(dev->flags & IFF_BROADCAST)) ||
189 (m->dev->flags & IFF_MULTICAST &&
190 !(dev->flags & IFF_MULTICAST)) ||
191 dev->mtu < m->dev->mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 return -EINVAL;
193 } else {
194 if (!(dev->flags&IFF_POINTOPOINT))
195 m->dev->flags &= ~IFF_POINTOPOINT;
196 if (!(dev->flags&IFF_BROADCAST))
197 m->dev->flags &= ~IFF_BROADCAST;
198 if (!(dev->flags&IFF_MULTICAST))
199 m->dev->flags &= ~IFF_MULTICAST;
200 if (dev->mtu < m->dev->mtu)
201 m->dev->mtu = dev->mtu;
202 }
203 q->next = NEXT_SLAVE(m->slaves);
204 NEXT_SLAVE(m->slaves) = sch;
205 } else {
206 q->next = sch;
207 m->slaves = sch;
208 m->dev->mtu = dev->mtu;
209 m->dev->flags = (m->dev->flags&~FMASK)|(dev->flags&FMASK);
210 }
211 return 0;
212}
213
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215static int
Eric Dumazetf7e57042011-11-30 04:08:58 +0000216__teql_resolve(struct sk_buff *skb, struct sk_buff *skb_res,
217 struct net_device *dev, struct netdev_queue *txq,
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700218 struct dst_entry *dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219{
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700220 struct neighbour *n;
221 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700223 n = dst_neigh_lookup_skb(dst, skb);
224 if (!n)
225 return -ENOENT;
226
227 if (dst->dev != dev) {
228 struct neighbour *mn;
229
230 mn = __neigh_lookup_errno(n->tbl, n->primary_key, dev);
231 neigh_release(n);
232 if (IS_ERR(mn))
233 return PTR_ERR(mn);
234 n = mn;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235 }
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 if (neigh_event_send(n, skb_res) == 0) {
238 int err;
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +0000239 char haddr[MAX_ADDR_LEN];
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700240
Eric Dumazet0ed8ddf2010-10-07 10:44:07 +0000241 neigh_ha_snapshot(haddr, n, dev);
Toke Høiland-Jørgensend7bf2eb2020-07-03 22:26:43 +0200242 err = dev_hard_header(skb, dev, ntohs(skb_protocol(skb, false)),
Jiri Pirkod8b96052015-01-13 17:13:43 +0100243 haddr, NULL, skb->len);
Stephen Hemminger0c4e8582007-10-09 01:36:32 -0700244
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700245 if (err < 0)
246 err = -EINVAL;
247 } else {
248 err = (skb_res == NULL) ? -EAGAIN : 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 }
250 neigh_release(n);
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700251 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252}
253
Stephen Hemminger3b04ddd2007-10-09 01:40:57 -0700254static inline int teql_resolve(struct sk_buff *skb,
Eric Dumazetf7e57042011-11-30 04:08:58 +0000255 struct sk_buff *skb_res,
256 struct net_device *dev,
257 struct netdev_queue *txq)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258{
Eric Dumazetf7e57042011-11-30 04:08:58 +0000259 struct dst_entry *dst = skb_dst(skb);
Eric Dumazetf7e57042011-11-30 04:08:58 +0000260 int res;
261
John Fastabend46e5da40a2014-09-12 20:04:52 -0700262 if (rcu_access_pointer(txq->qdisc) == &noop_qdisc)
Evgeniy Polyakov4f9f8312007-11-06 03:08:09 -0800263 return -ENODEV;
264
Eric Dumazetf7e57042011-11-30 04:08:58 +0000265 if (!dev->header_ops || !dst)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 return 0;
Eric Dumazetf7e57042011-11-30 04:08:58 +0000267
268 rcu_read_lock();
David S. Millerdbedbe6d2012-07-02 21:57:45 -0700269 res = __teql_resolve(skb, skb_res, dev, txq, dst);
Eric Dumazetf7e57042011-11-30 04:08:58 +0000270 rcu_read_unlock();
271
272 return res;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273}
274
Stephen Hemminger6fef4c02009-08-31 19:50:41 +0000275static netdev_tx_t teql_master_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276{
Patrick McHardy2941a482006-01-08 22:05:26 -0800277 struct teql_master *master = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 struct Qdisc *start, *q;
279 int busy;
280 int nores;
Pavel Emelyanov4e3ab472007-10-21 17:01:29 -0700281 int subq = skb_get_queue_mapping(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 struct sk_buff *skb_res = NULL;
283
284 start = master->slaves;
285
286restart:
287 nores = 0;
288 busy = 0;
289
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000290 q = start;
291 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 goto drop;
293
294 do {
David S. Miller5ce2d482008-07-08 17:06:30 -0700295 struct net_device *slave = qdisc_dev(q);
Stephen Hemminger61294e22009-01-06 10:45:57 -0800296 struct netdev_queue *slave_txq = netdev_get_tx_queue(slave, 0);
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900297
David S. Millere8a04642008-07-17 00:34:19 -0700298 if (slave_txq->qdisc_sleeping != q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 continue;
Tom Herbert734664982011-11-28 16:32:44 +0000300 if (netif_xmit_stopped(netdev_get_tx_queue(slave, subq)) ||
Peter P Waskiewicz Jrf25f4e42007-07-06 13:36:20 -0700301 !netif_running(slave)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 busy = 1;
303 continue;
304 }
305
Eric Dumazetf7e57042011-11-30 04:08:58 +0000306 switch (teql_resolve(skb, skb_res, slave, slave_txq)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 case 0:
David S. Millerc3f26a22008-07-31 16:58:50 -0700308 if (__netif_tx_trylock(slave_txq)) {
Eric Dumazetc0f84d02009-05-18 15:12:31 -0700309 unsigned int length = qdisc_pkt_len(skb);
310
Tom Herbert734664982011-11-28 16:32:44 +0000311 if (!netif_xmit_frozen_or_stopped(slave_txq) &&
David S. Millerfa2dbdc2014-08-29 21:55:22 -0700312 netdev_start_xmit(skb, slave, slave_txq, false) ==
313 NETDEV_TX_OK) {
David S. Millerc3f26a22008-07-31 16:58:50 -0700314 __netif_tx_unlock(slave_txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 master->slaves = NEXT_SLAVE(q);
316 netif_wake_queue(dev);
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000317 master->tx_packets++;
318 master->tx_bytes += length;
Patrick McHardy6ed10652009-06-23 06:03:08 +0000319 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 }
David S. Millerc3f26a22008-07-31 16:58:50 -0700321 __netif_tx_unlock(slave_txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 }
Tom Herbert734664982011-11-28 16:32:44 +0000323 if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 busy = 1;
325 break;
326 case 1:
327 master->slaves = NEXT_SLAVE(q);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000328 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 default:
330 nores = 1;
331 break;
332 }
Arnaldo Carvalho de Melobbe735e2007-03-10 22:16:10 -0300333 __skb_pull(skb, skb_network_offset(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 } while ((q = NEXT_SLAVE(q)) != start);
335
336 if (nores && skb_res == NULL) {
337 skb_res = skb;
338 goto restart;
339 }
340
341 if (busy) {
342 netif_stop_queue(dev);
Patrick McHardy5b548142009-06-12 06:22:29 +0000343 return NETDEV_TX_BUSY;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 }
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000345 master->tx_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347drop:
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000348 master->tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 dev_kfree_skb(skb);
Patrick McHardy6ed10652009-06-23 06:03:08 +0000350 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351}
352
353static int teql_master_open(struct net_device *dev)
354{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000355 struct Qdisc *q;
Patrick McHardy2941a482006-01-08 22:05:26 -0800356 struct teql_master *m = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 int mtu = 0xFFFE;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000358 unsigned int flags = IFF_NOARP | IFF_MULTICAST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360 if (m->slaves == NULL)
361 return -EUNATCH;
362
363 flags = FMASK;
364
365 q = m->slaves;
366 do {
David S. Miller5ce2d482008-07-08 17:06:30 -0700367 struct net_device *slave = qdisc_dev(q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368
369 if (slave == NULL)
370 return -EUNATCH;
371
372 if (slave->mtu < mtu)
373 mtu = slave->mtu;
374 if (slave->hard_header_len > LL_MAX_HEADER)
375 return -EINVAL;
376
377 /* If all the slaves are BROADCAST, master is BROADCAST
378 If all the slaves are PtP, master is PtP
379 Otherwise, master is NBMA.
380 */
381 if (!(slave->flags&IFF_POINTOPOINT))
382 flags &= ~IFF_POINTOPOINT;
383 if (!(slave->flags&IFF_BROADCAST))
384 flags &= ~IFF_BROADCAST;
385 if (!(slave->flags&IFF_MULTICAST))
386 flags &= ~IFF_MULTICAST;
387 } while ((q = NEXT_SLAVE(q)) != m->slaves);
388
389 m->dev->mtu = mtu;
390 m->dev->flags = (m->dev->flags&~FMASK) | flags;
391 netif_start_queue(m->dev);
392 return 0;
393}
394
395static int teql_master_close(struct net_device *dev)
396{
397 netif_stop_queue(dev);
398 return 0;
399}
400
stephen hemmingerbc1f4472017-01-06 19:12:52 -0800401static void teql_master_stats64(struct net_device *dev,
402 struct rtnl_link_stats64 *stats)
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000403{
404 struct teql_master *m = netdev_priv(dev);
405
406 stats->tx_packets = m->tx_packets;
407 stats->tx_bytes = m->tx_bytes;
408 stats->tx_errors = m->tx_errors;
409 stats->tx_dropped = m->tx_dropped;
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000410}
411
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412static int teql_master_mtu(struct net_device *dev, int new_mtu)
413{
Patrick McHardy2941a482006-01-08 22:05:26 -0800414 struct teql_master *m = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 struct Qdisc *q;
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 q = m->slaves;
418 if (q) {
419 do {
David S. Miller5ce2d482008-07-08 17:06:30 -0700420 if (new_mtu > qdisc_dev(q)->mtu)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 return -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000422 } while ((q = NEXT_SLAVE(q)) != m->slaves);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 }
424
425 dev->mtu = new_mtu;
426 return 0;
427}
428
Stephen Hemminger61294e22009-01-06 10:45:57 -0800429static const struct net_device_ops teql_netdev_ops = {
430 .ndo_open = teql_master_open,
431 .ndo_stop = teql_master_close,
432 .ndo_start_xmit = teql_master_xmit,
Eric Dumazet1ac9ad12011-01-12 12:13:14 +0000433 .ndo_get_stats64 = teql_master_stats64,
Stephen Hemminger61294e22009-01-06 10:45:57 -0800434 .ndo_change_mtu = teql_master_mtu,
435};
436
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437static __init void teql_master_setup(struct net_device *dev)
438{
Patrick McHardy2941a482006-01-08 22:05:26 -0800439 struct teql_master *master = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 struct Qdisc_ops *ops = &master->qops;
441
442 master->dev = dev;
443 ops->priv_size = sizeof(struct teql_sched_data);
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 ops->enqueue = teql_enqueue;
446 ops->dequeue = teql_dequeue;
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700447 ops->peek = teql_peek;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 ops->init = teql_qdisc_init;
449 ops->reset = teql_reset;
450 ops->destroy = teql_destroy;
451 ops->owner = THIS_MODULE;
452
Stephen Hemminger61294e22009-01-06 10:45:57 -0800453 dev->netdev_ops = &teql_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 dev->type = ARPHRD_VOID;
455 dev->mtu = 1500;
Jarod Wilson91572082016-10-20 13:55:20 -0400456 dev->min_mtu = 68;
457 dev->max_mtu = 65535;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 dev->tx_queue_len = 100;
459 dev->flags = IFF_NOARP;
460 dev->hard_header_len = LL_MAX_HEADER;
Eric Dumazet02875872014-10-05 18:38:35 -0700461 netif_keep_dst(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462}
463
464static LIST_HEAD(master_dev_list);
465static int max_equalizers = 1;
466module_param(max_equalizers, int, 0);
467MODULE_PARM_DESC(max_equalizers, "Max number of link equalizers");
468
469static int __init teql_init(void)
470{
471 int i;
472 int err = -ENODEV;
473
474 for (i = 0; i < max_equalizers; i++) {
475 struct net_device *dev;
476 struct teql_master *master;
477
Tom Gundersenc835a672014-07-14 16:37:24 +0200478 dev = alloc_netdev(sizeof(struct teql_master), "teql%d",
479 NET_NAME_UNKNOWN, teql_master_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 if (!dev) {
481 err = -ENOMEM;
482 break;
483 }
484
485 if ((err = register_netdev(dev))) {
486 free_netdev(dev);
487 break;
488 }
489
Patrick McHardy2941a482006-01-08 22:05:26 -0800490 master = netdev_priv(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 strlcpy(master->qops.id, dev->name, IFNAMSIZ);
493 err = register_qdisc(&master->qops);
494
495 if (err) {
496 unregister_netdev(dev);
497 free_netdev(dev);
498 break;
499 }
500
501 list_add_tail(&master->master_list, &master_dev_list);
502 }
503 return i ? 0 : err;
504}
505
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900506static void __exit teql_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
508 struct teql_master *master, *nxt;
509
510 list_for_each_entry_safe(master, nxt, &master_dev_list, master_list) {
511
512 list_del(&master->master_list);
513
514 unregister_qdisc(&master->qops);
515 unregister_netdev(master->dev);
516 free_netdev(master->dev);
517 }
518}
519
520module_init(teql_init);
521module_exit(teql_exit);
522
523MODULE_LICENSE("GPL");