Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * net/sched/sch_generic.c Generic packet scheduler routines. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. |
| 8 | * |
| 9 | * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> |
| 10 | * Jamal Hadi Salim, <hadi@cyberus.ca> 990601 |
| 11 | * - Ingress support |
| 12 | */ |
| 13 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | #include <linux/bitops.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/module.h> |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/kernel.h> |
| 18 | #include <linux/sched.h> |
| 19 | #include <linux/string.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/errno.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <linux/netdevice.h> |
| 22 | #include <linux/skbuff.h> |
| 23 | #include <linux/rtnetlink.h> |
| 24 | #include <linux/init.h> |
| 25 | #include <linux/rcupdate.h> |
| 26 | #include <linux/list.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | #include <net/pkt_sched.h> |
| 28 | |
| 29 | /* Main transmission queue. */ |
| 30 | |
Patrick McHardy | 0463d4a | 2007-04-16 17:02:10 -0700 | [diff] [blame] | 31 | /* Modifications to data participating in scheduling must be protected with |
David S. Miller | dc2b484 | 2008-07-08 17:18:23 -0700 | [diff] [blame] | 32 | * queue->lock spinlock. |
Patrick McHardy | 0463d4a | 2007-04-16 17:02:10 -0700 | [diff] [blame] | 33 | * |
| 34 | * The idea is the following: |
| 35 | * - enqueue, dequeue are serialized via top level device |
David S. Miller | dc2b484 | 2008-07-08 17:18:23 -0700 | [diff] [blame] | 36 | * spinlock queue->lock. |
Patrick McHardy | fd44de7 | 2007-04-16 17:07:08 -0700 | [diff] [blame] | 37 | * - ingress filtering is serialized via top level device |
David S. Miller | 555353c | 2008-07-08 17:33:13 -0700 | [diff] [blame] | 38 | * spinlock dev->rx_queue.lock. |
Patrick McHardy | 0463d4a | 2007-04-16 17:02:10 -0700 | [diff] [blame] | 39 | * - updates to tree and tree walking are only done under the rtnl mutex. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 40 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
| 42 | void qdisc_lock_tree(struct net_device *dev) |
David S. Miller | 555353c | 2008-07-08 17:33:13 -0700 | [diff] [blame] | 43 | __acquires(dev->rx_queue.lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 45 | unsigned int i; |
| 46 | |
| 47 | local_bh_disable(); |
| 48 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 49 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
| 50 | spin_lock(&txq->lock); |
| 51 | } |
David S. Miller | 555353c | 2008-07-08 17:33:13 -0700 | [diff] [blame] | 52 | spin_lock(&dev->rx_queue.lock); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | } |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 54 | EXPORT_SYMBOL(qdisc_lock_tree); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | |
| 56 | void qdisc_unlock_tree(struct net_device *dev) |
David S. Miller | 555353c | 2008-07-08 17:33:13 -0700 | [diff] [blame] | 57 | __releases(dev->rx_queue.lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 59 | unsigned int i; |
| 60 | |
David S. Miller | 555353c | 2008-07-08 17:33:13 -0700 | [diff] [blame] | 61 | spin_unlock(&dev->rx_queue.lock); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 62 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 63 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
| 64 | spin_unlock(&txq->lock); |
| 65 | } |
| 66 | local_bh_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | } |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 68 | EXPORT_SYMBOL(qdisc_unlock_tree); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 70 | static inline int qdisc_qlen(struct Qdisc *q) |
| 71 | { |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 72 | return q->q.qlen; |
| 73 | } |
| 74 | |
David S. Miller | 86d804e | 2008-07-08 23:11:25 -0700 | [diff] [blame] | 75 | static inline int dev_requeue_skb(struct sk_buff *skb, |
David S. Miller | 970565b | 2008-07-08 23:10:33 -0700 | [diff] [blame] | 76 | struct netdev_queue *dev_queue, |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 77 | struct Qdisc *q) |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 78 | { |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 79 | if (unlikely(skb->next)) |
David S. Miller | d3b753d | 2008-07-15 20:14:35 -0700 | [diff] [blame^] | 80 | q->gso_skb = skb; |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 81 | else |
| 82 | q->ops->requeue(skb, q); |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 83 | |
David S. Miller | 86d804e | 2008-07-08 23:11:25 -0700 | [diff] [blame] | 84 | netif_schedule_queue(dev_queue); |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 85 | return 0; |
| 86 | } |
| 87 | |
David S. Miller | d3b753d | 2008-07-15 20:14:35 -0700 | [diff] [blame^] | 88 | static inline struct sk_buff *dequeue_skb(struct Qdisc *q) |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 89 | { |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 90 | struct sk_buff *skb; |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 91 | |
David S. Miller | d3b753d | 2008-07-15 20:14:35 -0700 | [diff] [blame^] | 92 | if ((skb = q->gso_skb)) |
| 93 | q->gso_skb = NULL; |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 94 | else |
| 95 | skb = q->dequeue(q); |
| 96 | |
| 97 | return skb; |
| 98 | } |
| 99 | |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 100 | static inline int handle_dev_cpu_collision(struct sk_buff *skb, |
David S. Miller | 970565b | 2008-07-08 23:10:33 -0700 | [diff] [blame] | 101 | struct netdev_queue *dev_queue, |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 102 | struct Qdisc *q) |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 103 | { |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 104 | int ret; |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 105 | |
David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 106 | if (unlikely(dev_queue->xmit_lock_owner == smp_processor_id())) { |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 107 | /* |
| 108 | * Same CPU holding the lock. It may be a transient |
| 109 | * configuration error, when hard_start_xmit() recurses. We |
| 110 | * detect it by checking xmit owner and drop the packet when |
| 111 | * deadloop is detected. Return OK to try the next skb. |
| 112 | */ |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 113 | kfree_skb(skb); |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 114 | if (net_ratelimit()) |
| 115 | printk(KERN_WARNING "Dead loop on netdevice %s, " |
David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 116 | "fix it urgently!\n", dev_queue->dev->name); |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 117 | ret = qdisc_qlen(q); |
| 118 | } else { |
| 119 | /* |
| 120 | * Another cpu is holding lock, requeue & delay xmits for |
| 121 | * some time. |
| 122 | */ |
| 123 | __get_cpu_var(netdev_rx_stat).cpu_collision++; |
David S. Miller | 86d804e | 2008-07-08 23:11:25 -0700 | [diff] [blame] | 124 | ret = dev_requeue_skb(skb, dev_queue, q); |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 125 | } |
| 126 | |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 127 | return ret; |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 128 | } |
| 129 | |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 130 | /* |
David S. Miller | dc2b484 | 2008-07-08 17:18:23 -0700 | [diff] [blame] | 131 | * NOTE: Called under queue->lock with locally disabled BH. |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 132 | * |
David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 133 | * __QUEUE_STATE_QDISC_RUNNING guarantees only one CPU can process |
| 134 | * this queue at a time. queue->lock serializes queue accesses for |
| 135 | * this queue AND txq->qdisc pointer itself. |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 136 | * |
| 137 | * netif_tx_lock serializes accesses to device driver. |
| 138 | * |
David S. Miller | dc2b484 | 2008-07-08 17:18:23 -0700 | [diff] [blame] | 139 | * queue->lock and netif_tx_lock are mutually exclusive, |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 140 | * if one is grabbed, another must be free. |
| 141 | * |
| 142 | * Note, that this procedure can be called by a watchdog timer |
| 143 | * |
| 144 | * Returns to the caller: |
| 145 | * 0 - queue is empty or throttled. |
| 146 | * >0 - queue is not empty. |
| 147 | * |
| 148 | */ |
David S. Miller | eb6aafe | 2008-07-08 23:12:38 -0700 | [diff] [blame] | 149 | static inline int qdisc_restart(struct netdev_queue *txq) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 150 | { |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 151 | struct Qdisc *q = txq->qdisc; |
Peter P Waskiewicz Jr | 5f1a485 | 2007-11-13 20:40:55 -0800 | [diff] [blame] | 152 | int ret = NETDEV_TX_BUSY; |
David S. Miller | eb6aafe | 2008-07-08 23:12:38 -0700 | [diff] [blame] | 153 | struct net_device *dev; |
| 154 | struct sk_buff *skb; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 156 | /* Dequeue packet */ |
David S. Miller | d3b753d | 2008-07-15 20:14:35 -0700 | [diff] [blame^] | 157 | if (unlikely((skb = dequeue_skb(q)) == NULL)) |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 158 | return 0; |
Herbert Xu | f6a78bf | 2006-06-22 02:57:17 -0700 | [diff] [blame] | 159 | |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 160 | /* And release queue */ |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 161 | spin_unlock(&txq->lock); |
Herbert Xu | d90df3a | 2007-05-10 04:55:14 -0700 | [diff] [blame] | 162 | |
David S. Miller | eb6aafe | 2008-07-08 23:12:38 -0700 | [diff] [blame] | 163 | dev = txq->dev; |
| 164 | |
David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 165 | HARD_TX_LOCK(dev, txq, smp_processor_id()); |
Peter P Waskiewicz Jr | 5f1a485 | 2007-11-13 20:40:55 -0800 | [diff] [blame] | 166 | if (!netif_subqueue_stopped(dev, skb)) |
David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 167 | ret = dev_hard_start_xmit(skb, dev, txq); |
David S. Miller | c773e84 | 2008-07-08 23:13:53 -0700 | [diff] [blame] | 168 | HARD_TX_UNLOCK(dev, txq); |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 169 | |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 170 | spin_lock(&txq->lock); |
| 171 | q = txq->qdisc; |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 172 | |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 173 | switch (ret) { |
| 174 | case NETDEV_TX_OK: |
| 175 | /* Driver sent out skb successfully */ |
| 176 | ret = qdisc_qlen(q); |
| 177 | break; |
Jamal Hadi Salim | c716a81 | 2007-06-10 17:31:24 -0700 | [diff] [blame] | 178 | |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 179 | case NETDEV_TX_LOCKED: |
| 180 | /* Driver try lock failed */ |
David S. Miller | eb6aafe | 2008-07-08 23:12:38 -0700 | [diff] [blame] | 181 | ret = handle_dev_cpu_collision(skb, txq, q); |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 182 | break; |
| 183 | |
| 184 | default: |
| 185 | /* Driver returned NETDEV_TX_BUSY - requeue skb */ |
| 186 | if (unlikely (ret != NETDEV_TX_BUSY && net_ratelimit())) |
| 187 | printk(KERN_WARNING "BUG %s code %d qlen %d\n", |
| 188 | dev->name, ret, q->q.qlen); |
| 189 | |
David S. Miller | 86d804e | 2008-07-08 23:11:25 -0700 | [diff] [blame] | 190 | ret = dev_requeue_skb(skb, txq, q); |
Krishna Kumar | 6c1361a | 2007-06-24 19:56:09 -0700 | [diff] [blame] | 191 | break; |
| 192 | } |
| 193 | |
| 194 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 195 | } |
| 196 | |
David S. Miller | eb6aafe | 2008-07-08 23:12:38 -0700 | [diff] [blame] | 197 | void __qdisc_run(struct netdev_queue *txq) |
Herbert Xu | 48d8332 | 2006-06-19 23:57:59 -0700 | [diff] [blame] | 198 | { |
Herbert Xu | 2ba2506 | 2008-03-28 16:25:26 -0700 | [diff] [blame] | 199 | unsigned long start_time = jiffies; |
| 200 | |
David S. Miller | eb6aafe | 2008-07-08 23:12:38 -0700 | [diff] [blame] | 201 | while (qdisc_restart(txq)) { |
David S. Miller | fd2ea0a | 2008-07-17 01:56:23 -0700 | [diff] [blame] | 202 | if (netif_tx_queue_stopped(txq)) |
Herbert Xu | d90df3a | 2007-05-10 04:55:14 -0700 | [diff] [blame] | 203 | break; |
Herbert Xu | 2ba2506 | 2008-03-28 16:25:26 -0700 | [diff] [blame] | 204 | |
| 205 | /* |
| 206 | * Postpone processing if |
| 207 | * 1. another process needs the CPU; |
| 208 | * 2. we've been doing it for too long. |
| 209 | */ |
| 210 | if (need_resched() || jiffies != start_time) { |
David S. Miller | eb6aafe | 2008-07-08 23:12:38 -0700 | [diff] [blame] | 211 | netif_schedule_queue(txq); |
Herbert Xu | 2ba2506 | 2008-03-28 16:25:26 -0700 | [diff] [blame] | 212 | break; |
| 213 | } |
| 214 | } |
Herbert Xu | 48d8332 | 2006-06-19 23:57:59 -0700 | [diff] [blame] | 215 | |
David S. Miller | 79d1638 | 2008-07-08 23:14:46 -0700 | [diff] [blame] | 216 | clear_bit(__QUEUE_STATE_QDISC_RUNNING, &txq->state); |
Herbert Xu | 48d8332 | 2006-06-19 23:57:59 -0700 | [diff] [blame] | 217 | } |
| 218 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | static void dev_watchdog(unsigned long arg) |
| 220 | { |
| 221 | struct net_device *dev = (struct net_device *)arg; |
| 222 | |
Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 223 | netif_tx_lock(dev); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 224 | if (!qdisc_tx_is_noop(dev)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 225 | if (netif_device_present(dev) && |
| 226 | netif_running(dev) && |
| 227 | netif_carrier_ok(dev)) { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 228 | int some_queue_stopped = 0; |
| 229 | unsigned int i; |
Stephen Hemminger | 338f756 | 2006-05-16 15:02:12 -0700 | [diff] [blame] | 230 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 231 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 232 | struct netdev_queue *txq; |
| 233 | |
| 234 | txq = netdev_get_tx_queue(dev, i); |
| 235 | if (netif_tx_queue_stopped(txq)) { |
| 236 | some_queue_stopped = 1; |
| 237 | break; |
| 238 | } |
| 239 | } |
| 240 | |
| 241 | if (some_queue_stopped && |
| 242 | time_after(jiffies, (dev->trans_start + |
| 243 | dev->watchdog_timeo))) { |
| 244 | printk(KERN_INFO "NETDEV WATCHDOG: %s: " |
| 245 | "transmit timed out\n", |
Stephen Hemminger | 338f756 | 2006-05-16 15:02:12 -0700 | [diff] [blame] | 246 | dev->name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | dev->tx_timeout(dev); |
Arjan van de Ven | b4192bb | 2008-05-02 16:21:07 -0700 | [diff] [blame] | 248 | WARN_ON_ONCE(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | } |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 250 | if (!mod_timer(&dev->watchdog_timer, |
| 251 | round_jiffies(jiffies + |
| 252 | dev->watchdog_timeo))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | dev_hold(dev); |
| 254 | } |
| 255 | } |
Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 256 | netif_tx_unlock(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | |
| 258 | dev_put(dev); |
| 259 | } |
| 260 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | void __netdev_watchdog_up(struct net_device *dev) |
| 262 | { |
| 263 | if (dev->tx_timeout) { |
| 264 | if (dev->watchdog_timeo <= 0) |
| 265 | dev->watchdog_timeo = 5*HZ; |
Venkatesh Pallipadi | 60468d5 | 2007-05-31 21:28:44 -0700 | [diff] [blame] | 266 | if (!mod_timer(&dev->watchdog_timer, |
| 267 | round_jiffies(jiffies + dev->watchdog_timeo))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | dev_hold(dev); |
| 269 | } |
| 270 | } |
| 271 | |
| 272 | static void dev_watchdog_up(struct net_device *dev) |
| 273 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 274 | __netdev_watchdog_up(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | } |
| 276 | |
| 277 | static void dev_watchdog_down(struct net_device *dev) |
| 278 | { |
Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 279 | netif_tx_lock_bh(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 280 | if (del_timer(&dev->watchdog_timer)) |
Stephen Hemminger | 1533306 | 2006-03-20 22:32:28 -0800 | [diff] [blame] | 281 | dev_put(dev); |
Herbert Xu | 932ff27 | 2006-06-09 12:20:56 -0700 | [diff] [blame] | 282 | netif_tx_unlock_bh(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 283 | } |
| 284 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 285 | /** |
| 286 | * netif_carrier_on - set carrier |
| 287 | * @dev: network device |
| 288 | * |
| 289 | * Device has detected that carrier. |
| 290 | */ |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 291 | void netif_carrier_on(struct net_device *dev) |
| 292 | { |
Jeff Garzik | bfaae0f | 2007-10-17 23:26:43 -0700 | [diff] [blame] | 293 | if (test_and_clear_bit(__LINK_STATE_NOCARRIER, &dev->state)) { |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 294 | linkwatch_fire_event(dev); |
Jeff Garzik | bfaae0f | 2007-10-17 23:26:43 -0700 | [diff] [blame] | 295 | if (netif_running(dev)) |
| 296 | __netdev_watchdog_up(dev); |
| 297 | } |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 298 | } |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 299 | EXPORT_SYMBOL(netif_carrier_on); |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 300 | |
Stephen Hemminger | bea3348 | 2007-10-03 16:41:36 -0700 | [diff] [blame] | 301 | /** |
| 302 | * netif_carrier_off - clear carrier |
| 303 | * @dev: network device |
| 304 | * |
| 305 | * Device has detected loss of carrier. |
| 306 | */ |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 307 | void netif_carrier_off(struct net_device *dev) |
| 308 | { |
| 309 | if (!test_and_set_bit(__LINK_STATE_NOCARRIER, &dev->state)) |
| 310 | linkwatch_fire_event(dev); |
| 311 | } |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 312 | EXPORT_SYMBOL(netif_carrier_off); |
Denis Vlasenko | 0a242ef | 2005-08-11 15:32:53 -0700 | [diff] [blame] | 313 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | /* "NOOP" scheduler: the best scheduler, recommended for all interfaces |
| 315 | under all circumstances. It is difficult to invent anything faster or |
| 316 | cheaper. |
| 317 | */ |
| 318 | |
Thomas Graf | 94df109 | 2005-06-18 22:59:08 -0700 | [diff] [blame] | 319 | static int noop_enqueue(struct sk_buff *skb, struct Qdisc * qdisc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 320 | { |
| 321 | kfree_skb(skb); |
| 322 | return NET_XMIT_CN; |
| 323 | } |
| 324 | |
Thomas Graf | 94df109 | 2005-06-18 22:59:08 -0700 | [diff] [blame] | 325 | static struct sk_buff *noop_dequeue(struct Qdisc * qdisc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 326 | { |
| 327 | return NULL; |
| 328 | } |
| 329 | |
Thomas Graf | 94df109 | 2005-06-18 22:59:08 -0700 | [diff] [blame] | 330 | static int noop_requeue(struct sk_buff *skb, struct Qdisc* qdisc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | { |
| 332 | if (net_ratelimit()) |
Thomas Graf | 94df109 | 2005-06-18 22:59:08 -0700 | [diff] [blame] | 333 | printk(KERN_DEBUG "%s deferred output. It is buggy.\n", |
| 334 | skb->dev->name); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 335 | kfree_skb(skb); |
| 336 | return NET_XMIT_CN; |
| 337 | } |
| 338 | |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 339 | struct Qdisc_ops noop_qdisc_ops __read_mostly = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 340 | .id = "noop", |
| 341 | .priv_size = 0, |
| 342 | .enqueue = noop_enqueue, |
| 343 | .dequeue = noop_dequeue, |
| 344 | .requeue = noop_requeue, |
| 345 | .owner = THIS_MODULE, |
| 346 | }; |
| 347 | |
| 348 | struct Qdisc noop_qdisc = { |
| 349 | .enqueue = noop_enqueue, |
| 350 | .dequeue = noop_dequeue, |
| 351 | .flags = TCQ_F_BUILTIN, |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 352 | .ops = &noop_qdisc_ops, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 353 | .list = LIST_HEAD_INIT(noop_qdisc.list), |
| 354 | }; |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 355 | EXPORT_SYMBOL(noop_qdisc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 356 | |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 357 | static struct Qdisc_ops noqueue_qdisc_ops __read_mostly = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | .id = "noqueue", |
| 359 | .priv_size = 0, |
| 360 | .enqueue = noop_enqueue, |
| 361 | .dequeue = noop_dequeue, |
| 362 | .requeue = noop_requeue, |
| 363 | .owner = THIS_MODULE, |
| 364 | }; |
| 365 | |
| 366 | static struct Qdisc noqueue_qdisc = { |
| 367 | .enqueue = NULL, |
| 368 | .dequeue = noop_dequeue, |
| 369 | .flags = TCQ_F_BUILTIN, |
| 370 | .ops = &noqueue_qdisc_ops, |
| 371 | .list = LIST_HEAD_INIT(noqueue_qdisc.list), |
| 372 | }; |
| 373 | |
| 374 | |
| 375 | static const u8 prio2band[TC_PRIO_MAX+1] = |
| 376 | { 1, 2, 2, 2, 1, 2, 0, 0 , 1, 1, 1, 1, 1, 1, 1, 1 }; |
| 377 | |
| 378 | /* 3-band FIFO queue: old style, but should be a bit faster than |
| 379 | generic prio+fifo combination. |
| 380 | */ |
| 381 | |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 382 | #define PFIFO_FAST_BANDS 3 |
| 383 | |
Thomas Graf | 321090e | 2005-06-18 22:58:35 -0700 | [diff] [blame] | 384 | static inline struct sk_buff_head *prio2list(struct sk_buff *skb, |
| 385 | struct Qdisc *qdisc) |
| 386 | { |
| 387 | struct sk_buff_head *list = qdisc_priv(qdisc); |
| 388 | return list + prio2band[skb->priority & TC_PRIO_MAX]; |
| 389 | } |
| 390 | |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 391 | static int pfifo_fast_enqueue(struct sk_buff *skb, struct Qdisc* qdisc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 392 | { |
Thomas Graf | 321090e | 2005-06-18 22:58:35 -0700 | [diff] [blame] | 393 | struct sk_buff_head *list = prio2list(skb, qdisc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 394 | |
David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 395 | if (skb_queue_len(list) < qdisc_dev(qdisc)->tx_queue_len) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 396 | qdisc->q.qlen++; |
Thomas Graf | 821d24ae | 2005-06-18 22:58:15 -0700 | [diff] [blame] | 397 | return __qdisc_enqueue_tail(skb, qdisc, list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 398 | } |
Thomas Graf | 821d24ae | 2005-06-18 22:58:15 -0700 | [diff] [blame] | 399 | |
| 400 | return qdisc_drop(skb, qdisc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 401 | } |
| 402 | |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 403 | static struct sk_buff *pfifo_fast_dequeue(struct Qdisc* qdisc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | { |
| 405 | int prio; |
| 406 | struct sk_buff_head *list = qdisc_priv(qdisc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | |
Thomas Graf | 452f299 | 2005-07-18 13:30:53 -0700 | [diff] [blame] | 408 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) { |
| 409 | if (!skb_queue_empty(list + prio)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | qdisc->q.qlen--; |
Thomas Graf | 452f299 | 2005-07-18 13:30:53 -0700 | [diff] [blame] | 411 | return __qdisc_dequeue_head(qdisc, list + prio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 412 | } |
| 413 | } |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 414 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 415 | return NULL; |
| 416 | } |
| 417 | |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 418 | static int pfifo_fast_requeue(struct sk_buff *skb, struct Qdisc* qdisc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 419 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 420 | qdisc->q.qlen++; |
Thomas Graf | 321090e | 2005-06-18 22:58:35 -0700 | [diff] [blame] | 421 | return __qdisc_requeue(skb, qdisc, prio2list(skb, qdisc)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 422 | } |
| 423 | |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 424 | static void pfifo_fast_reset(struct Qdisc* qdisc) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 425 | { |
| 426 | int prio; |
| 427 | struct sk_buff_head *list = qdisc_priv(qdisc); |
| 428 | |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 429 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) |
Thomas Graf | 821d24ae | 2005-06-18 22:58:15 -0700 | [diff] [blame] | 430 | __qdisc_reset_queue(qdisc, list + prio); |
| 431 | |
| 432 | qdisc->qstats.backlog = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | qdisc->q.qlen = 0; |
| 434 | } |
| 435 | |
| 436 | static int pfifo_fast_dump(struct Qdisc *qdisc, struct sk_buff *skb) |
| 437 | { |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 438 | struct tc_prio_qopt opt = { .bands = PFIFO_FAST_BANDS }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 439 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | memcpy(&opt.priomap, prio2band, TC_PRIO_MAX+1); |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 441 | NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 442 | return skb->len; |
| 443 | |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 444 | nla_put_failure: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | return -1; |
| 446 | } |
| 447 | |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 448 | static int pfifo_fast_init(struct Qdisc *qdisc, struct nlattr *opt) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 449 | { |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 450 | int prio; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 451 | struct sk_buff_head *list = qdisc_priv(qdisc); |
| 452 | |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 453 | for (prio = 0; prio < PFIFO_FAST_BANDS; prio++) |
| 454 | skb_queue_head_init(list + prio); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 455 | |
| 456 | return 0; |
| 457 | } |
| 458 | |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 459 | static struct Qdisc_ops pfifo_fast_ops __read_mostly = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 460 | .id = "pfifo_fast", |
Thomas Graf | f87a9c3 | 2005-06-18 22:58:53 -0700 | [diff] [blame] | 461 | .priv_size = PFIFO_FAST_BANDS * sizeof(struct sk_buff_head), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | .enqueue = pfifo_fast_enqueue, |
| 463 | .dequeue = pfifo_fast_dequeue, |
| 464 | .requeue = pfifo_fast_requeue, |
| 465 | .init = pfifo_fast_init, |
| 466 | .reset = pfifo_fast_reset, |
| 467 | .dump = pfifo_fast_dump, |
| 468 | .owner = THIS_MODULE, |
| 469 | }; |
| 470 | |
David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 471 | struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 472 | struct Qdisc_ops *ops) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | { |
| 474 | void *p; |
| 475 | struct Qdisc *sch; |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 476 | unsigned int size; |
| 477 | int err = -ENOBUFS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 478 | |
| 479 | /* ensure that the Qdisc and the private data are 32-byte aligned */ |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 480 | size = QDISC_ALIGN(sizeof(*sch)); |
| 481 | size += ops->priv_size + (QDISC_ALIGNTO - 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | |
Panagiotis Issaris | 0da974f | 2006-07-21 14:51:30 -0700 | [diff] [blame] | 483 | p = kzalloc(size, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 484 | if (!p) |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 485 | goto errout; |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 486 | sch = (struct Qdisc *) QDISC_ALIGN((unsigned long) p); |
| 487 | sch->padded = (char *) sch - (char *) p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | |
| 489 | INIT_LIST_HEAD(&sch->list); |
| 490 | skb_queue_head_init(&sch->q); |
| 491 | sch->ops = ops; |
| 492 | sch->enqueue = ops->enqueue; |
| 493 | sch->dequeue = ops->dequeue; |
David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 494 | sch->dev_queue = dev_queue; |
David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 495 | dev_hold(qdisc_dev(sch)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | atomic_set(&sch->refcnt, 1); |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 497 | |
| 498 | return sch; |
| 499 | errout: |
WANG Cong | 01e123d | 2008-06-27 19:51:35 -0700 | [diff] [blame] | 500 | return ERR_PTR(err); |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 501 | } |
| 502 | |
David S. Miller | bb949fb | 2008-07-08 16:55:56 -0700 | [diff] [blame] | 503 | struct Qdisc * qdisc_create_dflt(struct net_device *dev, |
| 504 | struct netdev_queue *dev_queue, |
| 505 | struct Qdisc_ops *ops, |
Patrick McHardy | 9f9afec | 2006-11-29 17:35:18 -0800 | [diff] [blame] | 506 | unsigned int parentid) |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 507 | { |
| 508 | struct Qdisc *sch; |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 509 | |
David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 510 | sch = qdisc_alloc(dev_queue, ops); |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 511 | if (IS_ERR(sch)) |
| 512 | goto errout; |
Patrick McHardy | 9f9afec | 2006-11-29 17:35:18 -0800 | [diff] [blame] | 513 | sch->parent = parentid; |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 514 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 515 | if (!ops->init || ops->init(sch, NULL) == 0) |
| 516 | return sch; |
| 517 | |
Thomas Graf | 0fbbeb1 | 2005-08-23 10:12:44 -0700 | [diff] [blame] | 518 | qdisc_destroy(sch); |
Thomas Graf | 3d54b82 | 2005-07-05 14:15:09 -0700 | [diff] [blame] | 519 | errout: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 520 | return NULL; |
| 521 | } |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 522 | EXPORT_SYMBOL(qdisc_create_dflt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 523 | |
David S. Miller | dc2b484 | 2008-07-08 17:18:23 -0700 | [diff] [blame] | 524 | /* Under queue->lock and BH! */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 525 | |
| 526 | void qdisc_reset(struct Qdisc *qdisc) |
| 527 | { |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 528 | const struct Qdisc_ops *ops = qdisc->ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 529 | |
| 530 | if (ops->reset) |
| 531 | ops->reset(qdisc); |
| 532 | } |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 533 | EXPORT_SYMBOL(qdisc_reset); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 534 | |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 535 | /* this is the rcu callback function to clean up a qdisc when there |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 536 | * are no further references to it */ |
| 537 | |
| 538 | static void __qdisc_destroy(struct rcu_head *head) |
| 539 | { |
| 540 | struct Qdisc *qdisc = container_of(head, struct Qdisc, q_rcu); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | kfree((char *) qdisc - qdisc->padded); |
| 542 | } |
| 543 | |
David S. Miller | dc2b484 | 2008-07-08 17:18:23 -0700 | [diff] [blame] | 544 | /* Under queue->lock and BH! */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | |
| 546 | void qdisc_destroy(struct Qdisc *qdisc) |
| 547 | { |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 548 | const struct Qdisc_ops *ops = qdisc->ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | |
| 550 | if (qdisc->flags & TCQ_F_BUILTIN || |
Patrick McHardy | 85670cc | 2006-09-27 16:45:45 -0700 | [diff] [blame] | 551 | !atomic_dec_and_test(&qdisc->refcnt)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 552 | return; |
| 553 | |
Patrick McHardy | 85670cc | 2006-09-27 16:45:45 -0700 | [diff] [blame] | 554 | list_del(&qdisc->list); |
Patrick McHardy | 85670cc | 2006-09-27 16:45:45 -0700 | [diff] [blame] | 555 | gen_kill_estimator(&qdisc->bstats, &qdisc->rate_est); |
Patrick McHardy | 85670cc | 2006-09-27 16:45:45 -0700 | [diff] [blame] | 556 | if (ops->reset) |
| 557 | ops->reset(qdisc); |
| 558 | if (ops->destroy) |
| 559 | ops->destroy(qdisc); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 560 | |
Patrick McHardy | 85670cc | 2006-09-27 16:45:45 -0700 | [diff] [blame] | 561 | module_put(ops->owner); |
David S. Miller | 5ce2d48 | 2008-07-08 17:06:30 -0700 | [diff] [blame] | 562 | dev_put(qdisc_dev(qdisc)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 563 | call_rcu(&qdisc->q_rcu, __qdisc_destroy); |
| 564 | } |
Patrick McHardy | 62e3ba1 | 2008-01-22 22:10:23 -0800 | [diff] [blame] | 565 | EXPORT_SYMBOL(qdisc_destroy); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 566 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 567 | static bool dev_all_qdisc_sleeping_noop(struct net_device *dev) |
| 568 | { |
| 569 | unsigned int i; |
| 570 | |
| 571 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 572 | struct netdev_queue *txq = netdev_get_tx_queue(dev, i); |
| 573 | |
| 574 | if (txq->qdisc_sleeping != &noop_qdisc) |
| 575 | return false; |
| 576 | } |
| 577 | return true; |
| 578 | } |
| 579 | |
| 580 | static void attach_one_default_qdisc(struct net_device *dev, |
| 581 | struct netdev_queue *dev_queue, |
| 582 | void *_unused) |
| 583 | { |
| 584 | struct Qdisc *qdisc; |
| 585 | |
| 586 | if (dev->tx_queue_len) { |
| 587 | qdisc = qdisc_create_dflt(dev, dev_queue, |
| 588 | &pfifo_fast_ops, TC_H_ROOT); |
| 589 | if (!qdisc) { |
| 590 | printk(KERN_INFO "%s: activation failed\n", dev->name); |
| 591 | return; |
| 592 | } |
| 593 | list_add_tail(&qdisc->list, &dev_queue->qdisc_list); |
| 594 | } else { |
| 595 | qdisc = &noqueue_qdisc; |
| 596 | } |
| 597 | dev_queue->qdisc_sleeping = qdisc; |
| 598 | } |
| 599 | |
| 600 | static void transition_one_qdisc(struct net_device *dev, |
| 601 | struct netdev_queue *dev_queue, |
| 602 | void *_need_watchdog) |
| 603 | { |
| 604 | int *need_watchdog_p = _need_watchdog; |
| 605 | |
| 606 | spin_lock_bh(&dev_queue->lock); |
| 607 | rcu_assign_pointer(dev_queue->qdisc, dev_queue->qdisc_sleeping); |
| 608 | if (dev_queue->qdisc != &noqueue_qdisc) |
| 609 | *need_watchdog_p = 1; |
| 610 | spin_unlock_bh(&dev_queue->lock); |
| 611 | } |
| 612 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | void dev_activate(struct net_device *dev) |
| 614 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 615 | int need_watchdog; |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 616 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 617 | /* No queueing discipline is attached to device; |
| 618 | create default one i.e. pfifo_fast for devices, |
| 619 | which need queueing and noqueue_qdisc for |
| 620 | virtual interfaces |
| 621 | */ |
| 622 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 623 | if (dev_all_qdisc_sleeping_noop(dev)) |
| 624 | netdev_for_each_tx_queue(dev, attach_one_default_qdisc, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | |
Tommy S. Christensen | cacaddf | 2005-05-03 16:18:52 -0700 | [diff] [blame] | 626 | if (!netif_carrier_ok(dev)) |
| 627 | /* Delay activation until next carrier-on event */ |
| 628 | return; |
| 629 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 630 | need_watchdog = 0; |
| 631 | netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog); |
| 632 | |
| 633 | if (need_watchdog) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 634 | dev->trans_start = jiffies; |
| 635 | dev_watchdog_up(dev); |
| 636 | } |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 637 | } |
| 638 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 639 | static void dev_deactivate_queue(struct net_device *dev, |
| 640 | struct netdev_queue *dev_queue, |
| 641 | void *_qdisc_default) |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 642 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 643 | struct Qdisc *qdisc_default = _qdisc_default; |
David S. Miller | d3b753d | 2008-07-15 20:14:35 -0700 | [diff] [blame^] | 644 | struct sk_buff *skb = NULL; |
David S. Miller | 970565b | 2008-07-08 23:10:33 -0700 | [diff] [blame] | 645 | struct Qdisc *qdisc; |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 646 | |
David S. Miller | 970565b | 2008-07-08 23:10:33 -0700 | [diff] [blame] | 647 | spin_lock_bh(&dev_queue->lock); |
| 648 | |
| 649 | qdisc = dev_queue->qdisc; |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 650 | if (qdisc) { |
| 651 | dev_queue->qdisc = qdisc_default; |
| 652 | qdisc_reset(qdisc); |
David S. Miller | d3b753d | 2008-07-15 20:14:35 -0700 | [diff] [blame^] | 653 | |
| 654 | skb = qdisc->gso_skb; |
| 655 | qdisc->gso_skb = NULL; |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 656 | } |
David S. Miller | 970565b | 2008-07-08 23:10:33 -0700 | [diff] [blame] | 657 | |
| 658 | spin_unlock_bh(&dev_queue->lock); |
| 659 | |
| 660 | kfree_skb(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 661 | } |
| 662 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 663 | static bool some_qdisc_is_running(struct net_device *dev, int lock) |
| 664 | { |
| 665 | unsigned int i; |
| 666 | |
| 667 | for (i = 0; i < dev->num_tx_queues; i++) { |
| 668 | struct netdev_queue *dev_queue; |
| 669 | int val; |
| 670 | |
| 671 | dev_queue = netdev_get_tx_queue(dev, i); |
| 672 | |
| 673 | if (lock) |
| 674 | spin_lock_bh(&dev_queue->lock); |
| 675 | |
| 676 | val = test_bit(__QUEUE_STATE_QDISC_RUNNING, &dev_queue->state); |
| 677 | |
| 678 | if (lock) |
| 679 | spin_unlock_bh(&dev_queue->lock); |
| 680 | |
| 681 | if (val) |
| 682 | return true; |
| 683 | } |
| 684 | return false; |
| 685 | } |
| 686 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 687 | void dev_deactivate(struct net_device *dev) |
| 688 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 689 | bool running; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 690 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 691 | netdev_for_each_tx_queue(dev, dev_deactivate_queue, &noop_qdisc); |
Herbert Xu | 41a23b0 | 2007-05-10 14:12:47 -0700 | [diff] [blame] | 692 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 693 | dev_watchdog_down(dev); |
| 694 | |
Herbert Xu | ce0e32e | 2007-10-18 22:37:58 -0700 | [diff] [blame] | 695 | /* Wait for outstanding qdisc-less dev_queue_xmit calls. */ |
Herbert Xu | d4828d8 | 2006-06-22 02:28:18 -0700 | [diff] [blame] | 696 | synchronize_rcu(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | |
Herbert Xu | d4828d8 | 2006-06-22 02:28:18 -0700 | [diff] [blame] | 698 | /* Wait for outstanding qdisc_run calls. */ |
Herbert Xu | ce0e32e | 2007-10-18 22:37:58 -0700 | [diff] [blame] | 699 | do { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 700 | while (some_qdisc_is_running(dev, 0)) |
Herbert Xu | ce0e32e | 2007-10-18 22:37:58 -0700 | [diff] [blame] | 701 | yield(); |
| 702 | |
| 703 | /* |
| 704 | * Double-check inside queue lock to ensure that all effects |
| 705 | * of the queue run are visible when we return. |
| 706 | */ |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 707 | running = some_qdisc_is_running(dev, 1); |
Herbert Xu | ce0e32e | 2007-10-18 22:37:58 -0700 | [diff] [blame] | 708 | |
| 709 | /* |
| 710 | * The running flag should never be set at this point because |
| 711 | * we've already set dev->qdisc to noop_qdisc *inside* the same |
| 712 | * pair of spin locks. That is, if any qdisc_run starts after |
| 713 | * our initial test it should see the noop_qdisc and then |
| 714 | * clear the RUNNING bit before dropping the queue lock. So |
| 715 | * if it is set here then we've found a bug. |
| 716 | */ |
| 717 | } while (WARN_ON_ONCE(running)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 718 | } |
| 719 | |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 720 | static void dev_init_scheduler_queue(struct net_device *dev, |
| 721 | struct netdev_queue *dev_queue, |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 722 | void *_qdisc) |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 723 | { |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 724 | struct Qdisc *qdisc = _qdisc; |
| 725 | |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 726 | dev_queue->qdisc = qdisc; |
| 727 | dev_queue->qdisc_sleeping = qdisc; |
| 728 | INIT_LIST_HEAD(&dev_queue->qdisc_list); |
| 729 | } |
| 730 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 731 | void dev_init_scheduler(struct net_device *dev) |
| 732 | { |
| 733 | qdisc_lock_tree(dev); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 734 | netdev_for_each_tx_queue(dev, dev_init_scheduler_queue, &noop_qdisc); |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 735 | dev_init_scheduler_queue(dev, &dev->rx_queue, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 736 | qdisc_unlock_tree(dev); |
| 737 | |
Pavel Emelyanov | b24b8a2 | 2008-01-23 21:20:07 -0800 | [diff] [blame] | 738 | setup_timer(&dev->watchdog_timer, dev_watchdog, (unsigned long)dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 739 | } |
| 740 | |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 741 | static void shutdown_scheduler_queue(struct net_device *dev, |
| 742 | struct netdev_queue *dev_queue, |
| 743 | void *_qdisc_default) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | { |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 745 | struct Qdisc *qdisc = dev_queue->qdisc_sleeping; |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 746 | struct Qdisc *qdisc_default = _qdisc_default; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 747 | |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 748 | if (qdisc) { |
| 749 | dev_queue->qdisc = qdisc_default; |
| 750 | dev_queue->qdisc_sleeping = qdisc_default; |
| 751 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 752 | qdisc_destroy(qdisc); |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 753 | } |
David S. Miller | b0e1e64 | 2008-07-08 17:42:10 -0700 | [diff] [blame] | 754 | } |
| 755 | |
| 756 | void dev_shutdown(struct net_device *dev) |
| 757 | { |
| 758 | qdisc_lock_tree(dev); |
David S. Miller | e8a0464 | 2008-07-17 00:34:19 -0700 | [diff] [blame] | 759 | netdev_for_each_tx_queue(dev, shutdown_scheduler_queue, &noop_qdisc); |
| 760 | shutdown_scheduler_queue(dev, &dev->rx_queue, NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 761 | BUG_TRAP(!timer_pending(&dev->watchdog_timer)); |
| 762 | qdisc_unlock_tree(dev); |
| 763 | } |