blob: 7aad0121232cb3aa555572524d7609f061718f65 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_generic.c Generic packet scheduler routines.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 * Jamal Hadi Salim, <hadi@cyberus.ca> 990601
11 * - Ingress support
12 */
13
14#include <asm/uaccess.h>
15#include <asm/system.h>
16#include <linux/bitops.h>
17#include <linux/config.h>
18#include <linux/module.h>
19#include <linux/types.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/string.h>
23#include <linux/mm.h>
24#include <linux/socket.h>
25#include <linux/sockios.h>
26#include <linux/in.h>
27#include <linux/errno.h>
28#include <linux/interrupt.h>
29#include <linux/netdevice.h>
30#include <linux/skbuff.h>
31#include <linux/rtnetlink.h>
32#include <linux/init.h>
33#include <linux/rcupdate.h>
34#include <linux/list.h>
35#include <net/sock.h>
36#include <net/pkt_sched.h>
37
38/* Main transmission queue. */
39
40/* Main qdisc structure lock.
41
42 However, modifications
43 to data, participating in scheduling must be additionally
44 protected with dev->queue_lock spinlock.
45
46 The idea is the following:
47 - enqueue, dequeue are serialized via top level device
48 spinlock dev->queue_lock.
49 - tree walking is protected by read_lock_bh(qdisc_tree_lock)
50 and this lock is used only in process context.
51 - updates to tree are made under rtnl semaphore or
52 from softirq context (__qdisc_destroy rcu-callback)
53 hence this lock needs local bh disabling.
54
55 qdisc_tree_lock must be grabbed BEFORE dev->queue_lock!
56 */
57DEFINE_RWLOCK(qdisc_tree_lock);
58
59void qdisc_lock_tree(struct net_device *dev)
60{
61 write_lock_bh(&qdisc_tree_lock);
62 spin_lock_bh(&dev->queue_lock);
63}
64
65void qdisc_unlock_tree(struct net_device *dev)
66{
67 spin_unlock_bh(&dev->queue_lock);
68 write_unlock_bh(&qdisc_tree_lock);
69}
70
71/*
72 dev->queue_lock serializes queue accesses for this device
73 AND dev->qdisc pointer itself.
74
Herbert Xu932ff272006-06-09 12:20:56 -070075 netif_tx_lock serializes accesses to device driver.
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Herbert Xu932ff272006-06-09 12:20:56 -070077 dev->queue_lock and netif_tx_lock are mutually exclusive,
Linus Torvalds1da177e2005-04-16 15:20:36 -070078 if one is grabbed, another must be free.
79 */
80
81
82/* Kick device.
83 Note, that this procedure can be called by a watchdog timer, so that
84 we do not check dev->tbusy flag here.
85
86 Returns: 0 - queue is empty.
87 >0 - queue is not empty, but throttled.
88 <0 - queue is not empty. Device is throttled, if dev->tbusy != 0.
89
90 NOTE: Called under dev->queue_lock with locally disabled BH.
91*/
92
Herbert Xu48d83322006-06-19 23:57:59 -070093static inline int qdisc_restart(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
95 struct Qdisc *q = dev->qdisc;
96 struct sk_buff *skb;
97
98 /* Dequeue packet */
99 if ((skb = q->dequeue(q)) != NULL) {
100 unsigned nolock = (dev->features & NETIF_F_LLTX);
101 /*
102 * When the driver has LLTX set it does its own locking
103 * in start_xmit. No need to add additional overhead by
104 * locking again. These checks are worth it because
105 * even uncongested locks can be quite expensive.
106 * The driver can do trylock like here too, in case
107 * of lock congestion it should return -1 and the packet
108 * will be requeued.
109 */
110 if (!nolock) {
Herbert Xu932ff272006-06-09 12:20:56 -0700111 if (!netif_tx_trylock(dev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 collision:
113 /* So, someone grabbed the driver. */
114
115 /* It may be transient configuration error,
116 when hard_start_xmit() recurses. We detect
117 it by checking xmit owner and drop the
118 packet when deadloop is detected.
119 */
120 if (dev->xmit_lock_owner == smp_processor_id()) {
121 kfree_skb(skb);
122 if (net_ratelimit())
123 printk(KERN_DEBUG "Dead loop on netdevice %s, fix it urgently!\n", dev->name);
124 return -1;
125 }
126 __get_cpu_var(netdev_rx_stat).cpu_collision++;
127 goto requeue;
128 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 }
130
131 {
132 /* And release queue */
133 spin_unlock(&dev->queue_lock);
134
135 if (!netif_queue_stopped(dev)) {
136 int ret;
137 if (netdev_nit)
138 dev_queue_xmit_nit(skb, dev);
139
140 ret = dev->hard_start_xmit(skb, dev);
141 if (ret == NETDEV_TX_OK) {
142 if (!nolock) {
Herbert Xu932ff272006-06-09 12:20:56 -0700143 netif_tx_unlock(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144 }
145 spin_lock(&dev->queue_lock);
146 return -1;
147 }
148 if (ret == NETDEV_TX_LOCKED && nolock) {
149 spin_lock(&dev->queue_lock);
150 goto collision;
151 }
152 }
153
154 /* NETDEV_TX_BUSY - we need to requeue */
155 /* Release the driver */
156 if (!nolock) {
Herbert Xu932ff272006-06-09 12:20:56 -0700157 netif_tx_unlock(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 }
159 spin_lock(&dev->queue_lock);
160 q = dev->qdisc;
161 }
162
163 /* Device kicked us out :(
164 This is possible in three cases:
165
166 0. driver is locked
167 1. fastroute is enabled
168 2. device cannot determine busy state
169 before start of transmission (f.e. dialout)
170 3. device is buggy (ppp)
171 */
172
173requeue:
174 q->ops->requeue(skb, q);
175 netif_schedule(dev);
176 return 1;
177 }
Stephen Hemminger8cbe1d42005-05-03 16:24:03 -0700178 BUG_ON((int) q->q.qlen < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 return q->q.qlen;
180}
181
Herbert Xu48d83322006-06-19 23:57:59 -0700182void __qdisc_run(struct net_device *dev)
183{
Herbert Xud4828d82006-06-22 02:28:18 -0700184 if (unlikely(dev->qdisc == &noop_qdisc))
185 goto out;
186
Herbert Xu48d83322006-06-19 23:57:59 -0700187 while (qdisc_restart(dev) < 0 && !netif_queue_stopped(dev))
188 /* NOTHING */;
189
Herbert Xud4828d82006-06-22 02:28:18 -0700190out:
Herbert Xu48d83322006-06-19 23:57:59 -0700191 clear_bit(__LINK_STATE_QDISC_RUNNING, &dev->state);
192}
193
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194static void dev_watchdog(unsigned long arg)
195{
196 struct net_device *dev = (struct net_device *)arg;
197
Herbert Xu932ff272006-06-09 12:20:56 -0700198 netif_tx_lock(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 if (dev->qdisc != &noop_qdisc) {
200 if (netif_device_present(dev) &&
201 netif_running(dev) &&
202 netif_carrier_ok(dev)) {
203 if (netif_queue_stopped(dev) &&
Stephen Hemminger338f7562006-05-16 15:02:12 -0700204 time_after(jiffies, dev->trans_start + dev->watchdog_timeo)) {
205
206 printk(KERN_INFO "NETDEV WATCHDOG: %s: transmit timed out\n",
207 dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 dev->tx_timeout(dev);
209 }
210 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
211 dev_hold(dev);
212 }
213 }
Herbert Xu932ff272006-06-09 12:20:56 -0700214 netif_tx_unlock(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
216 dev_put(dev);
217}
218
219static void dev_watchdog_init(struct net_device *dev)
220{
221 init_timer(&dev->watchdog_timer);
222 dev->watchdog_timer.data = (unsigned long)dev;
223 dev->watchdog_timer.function = dev_watchdog;
224}
225
226void __netdev_watchdog_up(struct net_device *dev)
227{
228 if (dev->tx_timeout) {
229 if (dev->watchdog_timeo <= 0)
230 dev->watchdog_timeo = 5*HZ;
231 if (!mod_timer(&dev->watchdog_timer, jiffies + dev->watchdog_timeo))
232 dev_hold(dev);
233 }
234}
235
236static void dev_watchdog_up(struct net_device *dev)
237{
Herbert Xu932ff272006-06-09 12:20:56 -0700238 netif_tx_lock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 __netdev_watchdog_up(dev);
Herbert Xu932ff272006-06-09 12:20:56 -0700240 netif_tx_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241}
242
243static void dev_watchdog_down(struct net_device *dev)
244{
Herbert Xu932ff272006-06-09 12:20:56 -0700245 netif_tx_lock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 if (del_timer(&dev->watchdog_timer))
Stephen Hemminger15333062006-03-20 22:32:28 -0800247 dev_put(dev);
Herbert Xu932ff272006-06-09 12:20:56 -0700248 netif_tx_unlock_bh(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250
Denis Vlasenko0a242ef2005-08-11 15:32:53 -0700251void netif_carrier_on(struct net_device *dev)
252{
253 if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state))
254 linkwatch_fire_event(dev);
255 if (netif_running(dev))
256 __netdev_watchdog_up(dev);
257}
258
259void netif_carrier_off(struct net_device *dev)
260{
261 if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state))
262 linkwatch_fire_event(dev);
263}
264
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265/* "NOOP" scheduler: the best scheduler, recommended for all interfaces
266 under all circumstances. It is difficult to invent anything faster or
267 cheaper.
268 */
269
Thomas Graf94df1092005-06-18 22:59:08 -0700270static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271{
272 kfree_skb(skb);
273 return NET_XMIT_CN;
274}
275
Thomas Graf94df1092005-06-18 22:59:08 -0700276static struct sk_buff *noop_dequeue(struct Qdisc * qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277{
278 return NULL;
279}
280
Thomas Graf94df1092005-06-18 22:59:08 -0700281static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
283 if (net_ratelimit())
Thomas Graf94df1092005-06-18 22:59:08 -0700284 printk(KERN_DEBUG "%s deferred output. It is buggy.\n",
285 skb->dev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 kfree_skb(skb);
287 return NET_XMIT_CN;
288}
289
290struct Qdisc_ops noop_qdisc_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 .id = "noop",
292 .priv_size = 0,
293 .enqueue = noop_enqueue,
294 .dequeue = noop_dequeue,
295 .requeue = noop_requeue,
296 .owner = THIS_MODULE,
297};
298
299struct Qdisc noop_qdisc = {
300 .enqueue = noop_enqueue,
301 .dequeue = noop_dequeue,
302 .flags = TCQ_F_BUILTIN,
303 .ops = &noop_qdisc_ops,
304 .list = LIST_HEAD_INIT(noop_qdisc.list),
305};
306
307static struct Qdisc_ops noqueue_qdisc_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 .id = "noqueue",
309 .priv_size = 0,
310 .enqueue = noop_enqueue,
311 .dequeue = noop_dequeue,
312 .requeue = noop_requeue,
313 .owner = THIS_MODULE,
314};
315
316static struct Qdisc noqueue_qdisc = {
317 .enqueue = NULL,
318 .dequeue = noop_dequeue,
319 .flags = TCQ_F_BUILTIN,
320 .ops = &noqueue_qdisc_ops,
321 .list = LIST_HEAD_INIT(noqueue_qdisc.list),
322};
323
324
325static const u8 prio2band[TC_PRIO_MAX+1] =
326 { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 };
327
328/* 3-band FIFO queue: old style, but should be a bit faster than
329 generic prio+fifo combination.
330 */
331
Thomas Graff87a9c32005-06-18 22:58:53 -0700332#define PFIFO_FAST_BANDS 3
333
Thomas Graf321090e2005-06-18 22:58:35 -0700334static inline struct sk_buff_head *prio2list(struct sk_buff *skb,
335 struct Qdisc *qdisc)
336{
337 struct sk_buff_head *list = qdisc_priv(qdisc);
338 return list + prio2band[skb->priority & TC_PRIO_MAX];
339}
340
Thomas Graff87a9c32005-06-18 22:58:53 -0700341static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342{
Thomas Graf321090e2005-06-18 22:58:35 -0700343 struct sk_buff_head *list = prio2list(skb, qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Thomas Graf821d24ae2005-06-18 22:58:15 -0700345 if (skb_queue_len(list) < qdisc->dev->tx_queue_len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 qdisc->q.qlen++;
Thomas Graf821d24ae2005-06-18 22:58:15 -0700347 return __qdisc_enqueue_tail(skb, qdisc, list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348 }
Thomas Graf821d24ae2005-06-18 22:58:15 -0700349
350 return qdisc_drop(skb, qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351}
352
Thomas Graff87a9c32005-06-18 22:58:53 -0700353static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354{
355 int prio;
356 struct sk_buff_head *list = qdisc_priv(qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Thomas Graf452f2992005-07-18 13:30:53 -0700358 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) {
359 if (!skb_queue_empty(list + prio)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 qdisc->q.qlen--;
Thomas Graf452f2992005-07-18 13:30:53 -0700361 return __qdisc_dequeue_head(qdisc, list + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 }
363 }
Thomas Graff87a9c32005-06-18 22:58:53 -0700364
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 return NULL;
366}
367
Thomas Graff87a9c32005-06-18 22:58:53 -0700368static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 qdisc->q.qlen++;
Thomas Graf321090e2005-06-18 22:58:35 -0700371 return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372}
373
Thomas Graff87a9c32005-06-18 22:58:53 -0700374static void pfifo_fast_reset(struct Qdisc* qdisc)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375{
376 int prio;
377 struct sk_buff_head *list = qdisc_priv(qdisc);
378
Thomas Graff87a9c32005-06-18 22:58:53 -0700379 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
Thomas Graf821d24ae2005-06-18 22:58:15 -0700380 __qdisc_reset_queue(qdisc, list + prio);
381
382 qdisc->qstats.backlog = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 qdisc->q.qlen = 0;
384}
385
386static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb)
387{
Thomas Graff87a9c32005-06-18 22:58:53 -0700388 struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1);
391 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
392 return skb->len;
393
394rtattr_failure:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 return -1;
396}
397
398static int pfifo_fast_init(struct Qdisc *qdisc, struct rtattr *opt)
399{
Thomas Graff87a9c32005-06-18 22:58:53 -0700400 int prio;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 struct sk_buff_head *list = qdisc_priv(qdisc);
402
Thomas Graff87a9c32005-06-18 22:58:53 -0700403 for (prio = 0; prio < PFIFO_FAST_BANDS; prio++)
404 skb_queue_head_init(list + prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405
406 return 0;
407}
408
409static struct Qdisc_ops pfifo_fast_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 .id = "pfifo_fast",
Thomas Graff87a9c32005-06-18 22:58:53 -0700411 .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 .enqueue = pfifo_fast_enqueue,
413 .dequeue = pfifo_fast_dequeue,
414 .requeue = pfifo_fast_requeue,
415 .init = pfifo_fast_init,
416 .reset = pfifo_fast_reset,
417 .dump = pfifo_fast_dump,
418 .owner = THIS_MODULE,
419};
420
Thomas Graf3d54b822005-07-05 14:15:09 -0700421struct Qdisc *qdisc_alloc(struct net_device *dev, struct Qdisc_ops *ops)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422{
423 void *p;
424 struct Qdisc *sch;
Thomas Graf3d54b822005-07-05 14:15:09 -0700425 unsigned int size;
426 int err = -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427
428 /* ensure that the Qdisc and the private data are 32-byte aligned */
Thomas Graf3d54b822005-07-05 14:15:09 -0700429 size = QDISC_ALIGN(sizeof(*sch));
430 size += ops->priv_size + (QDISC_ALIGNTO - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
432 p = kmalloc(size, GFP_KERNEL);
433 if (!p)
Thomas Graf3d54b822005-07-05 14:15:09 -0700434 goto errout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 memset(p, 0, size);
Thomas Graf3d54b822005-07-05 14:15:09 -0700436 sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p);
437 sch->padded = (char *) sch - (char *) p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438
439 INIT_LIST_HEAD(&sch->list);
440 skb_queue_head_init(&sch->q);
441 sch->ops = ops;
442 sch->enqueue = ops->enqueue;
443 sch->dequeue = ops->dequeue;
444 sch->dev = dev;
445 dev_hold(dev);
446 sch->stats_lock = &dev->queue_lock;
447 atomic_set(&sch->refcnt, 1);
Thomas Graf3d54b822005-07-05 14:15:09 -0700448
449 return sch;
450errout:
451 return ERR_PTR(-err);
452}
453
454struct Qdisc * qdisc_create_dflt(struct net_device *dev, struct Qdisc_ops *ops)
455{
456 struct Qdisc *sch;
457
458 sch = qdisc_alloc(dev, ops);
459 if (IS_ERR(sch))
460 goto errout;
461
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462 if (!ops->init || ops->init(sch, NULL) == 0)
463 return sch;
464
Thomas Graf0fbbeb12005-08-23 10:12:44 -0700465 qdisc_destroy(sch);
Thomas Graf3d54b822005-07-05 14:15:09 -0700466errout:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 return NULL;
468}
469
470/* Under dev->queue_lock and BH! */
471
472void qdisc_reset(struct Qdisc *qdisc)
473{
474 struct Qdisc_ops *ops = qdisc->ops;
475
476 if (ops->reset)
477 ops->reset(qdisc);
478}
479
480/* this is the rcu callback function to clean up a qdisc when there
481 * are no further references to it */
482
483static void __qdisc_destroy(struct rcu_head *head)
484{
485 struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu);
486 struct Qdisc_ops *ops = qdisc->ops;
487
488#ifdef CONFIG_NET_ESTIMATOR
489 gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est);
490#endif
491 write_lock(&qdisc_tree_lock);
492 if (ops->reset)
493 ops->reset(qdisc);
494 if (ops->destroy)
495 ops->destroy(qdisc);
496 write_unlock(&qdisc_tree_lock);
497 module_put(ops->owner);
498
499 dev_put(qdisc->dev);
500 kfree((char *) qdisc - qdisc->padded);
501}
502
503/* Under dev->queue_lock and BH! */
504
505void qdisc_destroy(struct Qdisc *qdisc)
506{
507 struct list_head cql = LIST_HEAD_INIT(cql);
508 struct Qdisc *cq, *q, *n;
509
510 if (qdisc->flags & TCQ_F_BUILTIN ||
511 !atomic_dec_and_test(&qdisc->refcnt))
512 return;
513
514 if (!list_empty(&qdisc->list)) {
515 if (qdisc->ops->cl_ops == NULL)
516 list_del(&qdisc->list);
517 else
518 list_move(&qdisc->list, &cql);
519 }
520
521 /* unlink inner qdiscs from dev->qdisc_list immediately */
522 list_for_each_entry(cq, &cql, list)
523 list_for_each_entry_safe(q, n, &qdisc->dev->qdisc_list, list)
524 if (TC_H_MAJ(q->parent) == TC_H_MAJ(cq->handle)) {
525 if (q->ops->cl_ops == NULL)
526 list_del_init(&q->list);
527 else
528 list_move_tail(&q->list, &cql);
529 }
530 list_for_each_entry_safe(cq, n, &cql, list)
531 list_del_init(&cq->list);
532
533 call_rcu(&qdisc->q_rcu, __qdisc_destroy);
534}
535
536void dev_activate(struct net_device *dev)
537{
538 /* No queueing discipline is attached to device;
539 create default one i.e. pfifo_fast for devices,
540 which need queueing and noqueue_qdisc for
541 virtual interfaces
542 */
543
544 if (dev->qdisc_sleeping == &noop_qdisc) {
545 struct Qdisc *qdisc;
546 if (dev->tx_queue_len) {
547 qdisc = qdisc_create_dflt(dev, &pfifo_fast_ops);
548 if (qdisc == NULL) {
549 printk(KERN_INFO "%s: activation failed\n", dev->name);
550 return;
551 }
552 write_lock_bh(&qdisc_tree_lock);
553 list_add_tail(&qdisc->list, &dev->qdisc_list);
554 write_unlock_bh(&qdisc_tree_lock);
555 } else {
556 qdisc = &noqueue_qdisc;
557 }
558 write_lock_bh(&qdisc_tree_lock);
559 dev->qdisc_sleeping = qdisc;
560 write_unlock_bh(&qdisc_tree_lock);
561 }
562
Tommy S. Christensencacaddf2005-05-03 16:18:52 -0700563 if (!netif_carrier_ok(dev))
564 /* Delay activation until next carrier-on event */
565 return;
566
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 spin_lock_bh(&dev->queue_lock);
568 rcu_assign_pointer(dev->qdisc, dev->qdisc_sleeping);
569 if (dev->qdisc != &noqueue_qdisc) {
570 dev->trans_start = jiffies;
571 dev_watchdog_up(dev);
572 }
573 spin_unlock_bh(&dev->queue_lock);
574}
575
576void dev_deactivate(struct net_device *dev)
577{
578 struct Qdisc *qdisc;
579
580 spin_lock_bh(&dev->queue_lock);
581 qdisc = dev->qdisc;
582 dev->qdisc = &noop_qdisc;
583
584 qdisc_reset(qdisc);
585
586 spin_unlock_bh(&dev->queue_lock);
587
588 dev_watchdog_down(dev);
589
Herbert Xud4828d82006-06-22 02:28:18 -0700590 /* Wait for outstanding dev_queue_xmit calls. */
591 synchronize_rcu();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Herbert Xud4828d82006-06-22 02:28:18 -0700593 /* Wait for outstanding qdisc_run calls. */
594 while (test_bit(__LINK_STATE_QDISC_RUNNING, &dev->state))
595 yield();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596}
597
598void dev_init_scheduler(struct net_device *dev)
599{
600 qdisc_lock_tree(dev);
601 dev->qdisc = &noop_qdisc;
602 dev->qdisc_sleeping = &noop_qdisc;
603 INIT_LIST_HEAD(&dev->qdisc_list);
604 qdisc_unlock_tree(dev);
605
606 dev_watchdog_init(dev);
607}
608
609void dev_shutdown(struct net_device *dev)
610{
611 struct Qdisc *qdisc;
612
613 qdisc_lock_tree(dev);
614 qdisc = dev->qdisc_sleeping;
615 dev->qdisc = &noop_qdisc;
616 dev->qdisc_sleeping = &noop_qdisc;
617 qdisc_destroy(qdisc);
618#if defined(CONFIG_NET_SCH_INGRESS) || defined(CONFIG_NET_SCH_INGRESS_MODULE)
619 if ((qdisc = dev->qdisc_ingress) != NULL) {
620 dev->qdisc_ingress = NULL;
621 qdisc_destroy(qdisc);
622 }
623#endif
624 BUG_TRAP(!timer_pending(&dev->watchdog_timer));
625 qdisc_unlock_tree(dev);
626}
627
628EXPORT_SYMBOL(__netdev_watchdog_up);
Denis Vlasenko0a242ef2005-08-11 15:32:53 -0700629EXPORT_SYMBOL(netif_carrier_on);
630EXPORT_SYMBOL(netif_carrier_off);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631EXPORT_SYMBOL(noop_qdisc);
632EXPORT_SYMBOL(noop_qdisc_ops);
633EXPORT_SYMBOL(qdisc_create_dflt);
Thomas Graf3d54b822005-07-05 14:15:09 -0700634EXPORT_SYMBOL(qdisc_alloc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635EXPORT_SYMBOL(qdisc_destroy);
636EXPORT_SYMBOL(qdisc_reset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637EXPORT_SYMBOL(qdisc_lock_tree);
638EXPORT_SYMBOL(qdisc_unlock_tree);