blob: 9dcb5bfe094a3c6b833f253c1e04cba17c3d89ae [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __NET_SCHED_GENERIC_H
2#define __NET_SCHED_GENERIC_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/netdevice.h>
5#include <linux/types.h>
6#include <linux/rcupdate.h>
7#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008#include <linux/pkt_sched.h>
9#include <linux/pkt_cls.h>
10#include <net/gen_stats.h>
Thomas Grafbe577dd2007-03-22 11:55:50 -070011#include <net/rtnetlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012
13struct Qdisc_ops;
14struct qdisc_walker;
15struct tcf_walker;
16struct module;
17
18struct qdisc_rate_table
19{
20 struct tc_ratespec rate;
21 u32 data[256];
22 struct qdisc_rate_table *next;
23 int refcnt;
24};
25
David S. Millere2627c82008-07-16 00:56:32 -070026enum qdisc_state_t
27{
28 __QDISC_STATE_RUNNING,
David S. Miller37437bb2008-07-16 02:15:04 -070029 __QDISC_STATE_SCHED,
David S. Millera9312ae2008-08-17 21:51:03 -070030 __QDISC_STATE_DEACTIVATED,
David S. Millere2627c82008-07-16 00:56:32 -070031};
32
Jussi Kivilinna175f9c12008-07-20 00:08:47 -070033struct qdisc_size_table {
34 struct list_head list;
35 struct tc_sizespec szopts;
36 int refcnt;
37 u16 data[];
38};
39
Linus Torvalds1da177e2005-04-16 15:20:36 -070040struct Qdisc
41{
42 int (*enqueue)(struct sk_buff *skb, struct Qdisc *dev);
43 struct sk_buff * (*dequeue)(struct Qdisc *dev);
44 unsigned flags;
45#define TCQ_F_BUILTIN 1
46#define TCQ_F_THROTTLED 2
47#define TCQ_F_INGRESS 4
48 int padded;
49 struct Qdisc_ops *ops;
Jussi Kivilinna175f9c12008-07-20 00:08:47 -070050 struct qdisc_size_table *stab;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051 u32 handle;
52 u32 parent;
53 atomic_t refcnt;
David S. Millere2627c82008-07-16 00:56:32 -070054 unsigned long state;
Jarek Poplawski554794d2008-10-06 09:54:39 -070055 struct sk_buff *gso_skb;
David S. Miller242f8bfe2008-09-22 22:15:30 -070056 struct sk_buff_head requeue;
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 struct sk_buff_head q;
David S. Millerbb949fb2008-07-08 16:55:56 -070058 struct netdev_queue *dev_queue;
David S. Miller37437bb2008-07-16 02:15:04 -070059 struct Qdisc *next_sched;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 struct list_head list;
61
62 struct gnet_stats_basic bstats;
63 struct gnet_stats_queue qstats;
64 struct gnet_stats_rate_est rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 int (*reshape_fail)(struct sk_buff *skb,
66 struct Qdisc *q);
67
David S. Miller72b25a92008-07-18 20:54:17 -070068 void *u32_node;
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 /* This field is deprecated, but it is still used by CBQ
71 * and it will live until better solution will be invented.
72 */
73 struct Qdisc *__parent;
74};
75
76struct Qdisc_class_ops
77{
78 /* Child qdisc manipulation */
79 int (*graft)(struct Qdisc *, unsigned long cl,
80 struct Qdisc *, struct Qdisc **);
81 struct Qdisc * (*leaf)(struct Qdisc *, unsigned long cl);
Patrick McHardy43effa12006-11-29 17:35:48 -080082 void (*qlen_notify)(struct Qdisc *, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84 /* Class manipulation routines */
85 unsigned long (*get)(struct Qdisc *, u32 classid);
86 void (*put)(struct Qdisc *, unsigned long);
87 int (*change)(struct Qdisc *, u32, u32,
Patrick McHardy1e904742008-01-22 22:11:17 -080088 struct nlattr **, unsigned long *);
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 int (*delete)(struct Qdisc *, unsigned long);
90 void (*walk)(struct Qdisc *, struct qdisc_walker * arg);
91
92 /* Filter manipulation */
93 struct tcf_proto ** (*tcf_chain)(struct Qdisc *, unsigned long);
94 unsigned long (*bind_tcf)(struct Qdisc *, unsigned long,
95 u32 classid);
96 void (*unbind_tcf)(struct Qdisc *, unsigned long);
97
98 /* rtnetlink specific */
99 int (*dump)(struct Qdisc *, unsigned long,
100 struct sk_buff *skb, struct tcmsg*);
101 int (*dump_stats)(struct Qdisc *, unsigned long,
102 struct gnet_dump *);
103};
104
105struct Qdisc_ops
106{
107 struct Qdisc_ops *next;
Eric Dumazet20fea082007-11-14 01:44:41 -0800108 const struct Qdisc_class_ops *cl_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 char id[IFNAMSIZ];
110 int priv_size;
111
112 int (*enqueue)(struct sk_buff *, struct Qdisc *);
113 struct sk_buff * (*dequeue)(struct Qdisc *);
Jarek Poplawski90d841fd2008-10-31 00:43:45 -0700114 struct sk_buff * (*peek)(struct Qdisc *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115 int (*requeue)(struct sk_buff *, struct Qdisc *);
116 unsigned int (*drop)(struct Qdisc *);
117
Patrick McHardy1e904742008-01-22 22:11:17 -0800118 int (*init)(struct Qdisc *, struct nlattr *arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119 void (*reset)(struct Qdisc *);
120 void (*destroy)(struct Qdisc *);
Patrick McHardy1e904742008-01-22 22:11:17 -0800121 int (*change)(struct Qdisc *, struct nlattr *arg);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 int (*dump)(struct Qdisc *, struct sk_buff *);
124 int (*dump_stats)(struct Qdisc *, struct gnet_dump *);
125
126 struct module *owner;
127};
128
129
130struct tcf_result
131{
132 unsigned long class;
133 u32 classid;
134};
135
136struct tcf_proto_ops
137{
138 struct tcf_proto_ops *next;
139 char kind[IFNAMSIZ];
140
141 int (*classify)(struct sk_buff*, struct tcf_proto*,
142 struct tcf_result *);
143 int (*init)(struct tcf_proto*);
144 void (*destroy)(struct tcf_proto*);
145
146 unsigned long (*get)(struct tcf_proto*, u32 handle);
147 void (*put)(struct tcf_proto*, unsigned long);
148 int (*change)(struct tcf_proto*, unsigned long,
Patrick McHardyadd93b62008-01-22 22:11:33 -0800149 u32 handle, struct nlattr **,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 unsigned long *);
151 int (*delete)(struct tcf_proto*, unsigned long);
152 void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
153
154 /* rtnetlink specific */
155 int (*dump)(struct tcf_proto*, unsigned long,
156 struct sk_buff *skb, struct tcmsg*);
157
158 struct module *owner;
159};
160
161struct tcf_proto
162{
163 /* Fast access part */
164 struct tcf_proto *next;
165 void *root;
166 int (*classify)(struct sk_buff*, struct tcf_proto*,
167 struct tcf_result *);
Al Viro66c6f522006-11-20 18:07:51 -0800168 __be16 protocol;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169
170 /* All the rest */
171 u32 prio;
172 u32 classid;
173 struct Qdisc *q;
174 void *data;
175 struct tcf_proto_ops *ops;
176};
177
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700178struct qdisc_skb_cb {
179 unsigned int pkt_len;
180 char data[];
181};
182
183static inline struct qdisc_skb_cb *qdisc_skb_cb(struct sk_buff *skb)
184{
185 return (struct qdisc_skb_cb *)skb->cb;
186}
187
David S. Miller838740002008-07-17 00:53:03 -0700188static inline spinlock_t *qdisc_lock(struct Qdisc *qdisc)
189{
190 return &qdisc->q.lock;
191}
192
David S. Miller7698b4f2008-07-16 01:42:40 -0700193static inline struct Qdisc *qdisc_root(struct Qdisc *qdisc)
194{
195 return qdisc->dev_queue->qdisc;
196}
197
Jarek Poplawski2540e052008-08-21 05:11:14 -0700198static inline struct Qdisc *qdisc_root_sleeping(struct Qdisc *qdisc)
199{
200 return qdisc->dev_queue->qdisc_sleeping;
201}
202
David S. Miller7e43f112008-08-02 23:27:37 -0700203/* The qdisc root lock is a mechanism by which to top level
204 * of a qdisc tree can be locked from any qdisc node in the
205 * forest. This allows changing the configuration of some
206 * aspect of the qdisc tree while blocking out asynchronous
207 * qdisc access in the packet processing paths.
208 *
209 * It is only legal to do this when the root will not change
210 * on us. Otherwise we'll potentially lock the wrong qdisc
211 * root. This is enforced by holding the RTNL semaphore, which
212 * all users of this lock accessor must do.
213 */
David S. Miller7698b4f2008-07-16 01:42:40 -0700214static inline spinlock_t *qdisc_root_lock(struct Qdisc *qdisc)
215{
216 struct Qdisc *root = qdisc_root(qdisc);
217
David S. Miller7e43f112008-08-02 23:27:37 -0700218 ASSERT_RTNL();
David S. Miller838740002008-07-17 00:53:03 -0700219 return qdisc_lock(root);
David S. Miller7698b4f2008-07-16 01:42:40 -0700220}
221
Jarek Poplawskif6f9b932008-08-27 02:25:17 -0700222static inline spinlock_t *qdisc_root_sleeping_lock(struct Qdisc *qdisc)
223{
224 struct Qdisc *root = qdisc_root_sleeping(qdisc);
225
226 ASSERT_RTNL();
227 return qdisc_lock(root);
228}
229
David S. Miller5ce2d482008-07-08 17:06:30 -0700230static inline struct net_device *qdisc_dev(struct Qdisc *qdisc)
231{
232 return qdisc->dev_queue->dev;
233}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
David S. Miller78a5b302008-07-16 03:12:24 -0700235static inline void sch_tree_lock(struct Qdisc *q)
236{
Jarek Poplawskife439dd2008-08-27 02:27:10 -0700237 spin_lock_bh(qdisc_root_sleeping_lock(q));
David S. Miller78a5b302008-07-16 03:12:24 -0700238}
239
240static inline void sch_tree_unlock(struct Qdisc *q)
241{
Jarek Poplawskife439dd2008-08-27 02:27:10 -0700242 spin_unlock_bh(qdisc_root_sleeping_lock(q));
David S. Miller78a5b302008-07-16 03:12:24 -0700243}
244
245#define tcf_tree_lock(tp) sch_tree_lock((tp)->q)
246#define tcf_tree_unlock(tp) sch_tree_unlock((tp)->q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Thomas Grafe41a33e2005-07-05 14:14:30 -0700248extern struct Qdisc noop_qdisc;
249extern struct Qdisc_ops noop_qdisc_ops;
250
Patrick McHardy6fe1c7a2008-07-05 23:21:31 -0700251struct Qdisc_class_common
252{
253 u32 classid;
254 struct hlist_node hnode;
255};
256
257struct Qdisc_class_hash
258{
259 struct hlist_head *hash;
260 unsigned int hashsize;
261 unsigned int hashmask;
262 unsigned int hashelems;
263};
264
265static inline unsigned int qdisc_class_hash(u32 id, u32 mask)
266{
267 id ^= id >> 8;
268 id ^= id >> 4;
269 return id & mask;
270}
271
272static inline struct Qdisc_class_common *
273qdisc_class_find(struct Qdisc_class_hash *hash, u32 id)
274{
275 struct Qdisc_class_common *cl;
276 struct hlist_node *n;
277 unsigned int h;
278
279 h = qdisc_class_hash(id, hash->hashmask);
280 hlist_for_each_entry(cl, n, &hash->hash[h], hnode) {
281 if (cl->classid == id)
282 return cl;
283 }
284 return NULL;
285}
286
287extern int qdisc_class_hash_init(struct Qdisc_class_hash *);
288extern void qdisc_class_hash_insert(struct Qdisc_class_hash *, struct Qdisc_class_common *);
289extern void qdisc_class_hash_remove(struct Qdisc_class_hash *, struct Qdisc_class_common *);
290extern void qdisc_class_hash_grow(struct Qdisc *, struct Qdisc_class_hash *);
291extern void qdisc_class_hash_destroy(struct Qdisc_class_hash *);
292
Thomas Grafe41a33e2005-07-05 14:14:30 -0700293extern void dev_init_scheduler(struct net_device *dev);
294extern void dev_shutdown(struct net_device *dev);
295extern void dev_activate(struct net_device *dev);
296extern void dev_deactivate(struct net_device *dev);
297extern void qdisc_reset(struct Qdisc *qdisc);
298extern void qdisc_destroy(struct Qdisc *qdisc);
Patrick McHardy43effa12006-11-29 17:35:48 -0800299extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n);
David S. Miller5ce2d482008-07-08 17:06:30 -0700300extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue,
David S. Millerbb949fb2008-07-08 16:55:56 -0700301 struct Qdisc_ops *ops);
Thomas Grafe41a33e2005-07-05 14:14:30 -0700302extern struct Qdisc *qdisc_create_dflt(struct net_device *dev,
David S. Millerbb949fb2008-07-08 16:55:56 -0700303 struct netdev_queue *dev_queue,
Patrick McHardy9f9afec2006-11-29 17:35:18 -0800304 struct Qdisc_ops *ops, u32 parentid);
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700305extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
306 struct qdisc_size_table *stab);
Patrick McHardya48b5a62007-03-23 11:29:43 -0700307extern void tcf_destroy(struct tcf_proto *tp);
Patrick McHardyff31ab52008-07-01 19:52:38 -0700308extern void tcf_destroy_chain(struct tcf_proto **fl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
David S. Miller5aa70992008-07-08 22:59:10 -0700310/* Reset all TX qdiscs of a device. */
311static inline void qdisc_reset_all_tx(struct net_device *dev)
312{
David S. Millere8a04642008-07-17 00:34:19 -0700313 unsigned int i;
314 for (i = 0; i < dev->num_tx_queues; i++)
315 qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc);
David S. Miller5aa70992008-07-08 22:59:10 -0700316}
317
David S. Miller3e745dd2008-07-08 23:00:25 -0700318/* Are all TX queues of the device empty? */
319static inline bool qdisc_all_tx_empty(const struct net_device *dev)
320{
David S. Millere8a04642008-07-17 00:34:19 -0700321 unsigned int i;
322 for (i = 0; i < dev->num_tx_queues; i++) {
323 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
324 const struct Qdisc *q = txq->qdisc;
David S. Miller3e745dd2008-07-08 23:00:25 -0700325
David S. Millere8a04642008-07-17 00:34:19 -0700326 if (q->q.qlen)
327 return false;
328 }
329 return true;
David S. Miller3e745dd2008-07-08 23:00:25 -0700330}
331
David S. Miller6fa98642008-07-08 23:01:06 -0700332/* Are any of the TX qdiscs changing? */
333static inline bool qdisc_tx_changing(struct net_device *dev)
334{
David S. Millere8a04642008-07-17 00:34:19 -0700335 unsigned int i;
336 for (i = 0; i < dev->num_tx_queues; i++) {
337 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
338 if (txq->qdisc != txq->qdisc_sleeping)
339 return true;
340 }
341 return false;
David S. Miller6fa98642008-07-08 23:01:06 -0700342}
343
David S. Millere8a04642008-07-17 00:34:19 -0700344/* Is the device using the noop qdisc on all queues? */
David S. Miller05297942008-07-08 23:01:27 -0700345static inline bool qdisc_tx_is_noop(const struct net_device *dev)
346{
David S. Millere8a04642008-07-17 00:34:19 -0700347 unsigned int i;
348 for (i = 0; i < dev->num_tx_queues; i++) {
349 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
350 if (txq->qdisc != &noop_qdisc)
351 return false;
352 }
353 return true;
David S. Miller05297942008-07-08 23:01:27 -0700354}
355
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700356static inline unsigned int qdisc_pkt_len(struct sk_buff *skb)
357{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700358 return qdisc_skb_cb(skb)->pkt_len;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700359}
360
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700361/* additional qdisc xmit flags (NET_XMIT_MASK in linux/netdevice.h) */
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700362enum net_xmit_qdisc_t {
363 __NET_XMIT_STOLEN = 0x00010000,
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700364 __NET_XMIT_BYPASS = 0x00020000,
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700365};
366
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700367#ifdef CONFIG_NET_CLS_ACT
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700368#define net_xmit_drop_count(e) ((e) & __NET_XMIT_STOLEN ? 0 : 1)
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700369#else
370#define net_xmit_drop_count(e) (1)
371#endif
372
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700373static inline int qdisc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
374{
David S. Miller3a682fb2008-07-20 18:13:01 -0700375#ifdef CONFIG_NET_SCHED
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700376 if (sch->stab)
377 qdisc_calculate_pkt_len(skb, sch->stab);
David S. Miller3a682fb2008-07-20 18:13:01 -0700378#endif
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700379 return sch->enqueue(skb, sch);
380}
381
382static inline int qdisc_enqueue_root(struct sk_buff *skb, struct Qdisc *sch)
383{
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700384 qdisc_skb_cb(skb)->pkt_len = skb->len;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700385 return qdisc_enqueue(skb, sch) & NET_XMIT_MASK;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700386}
387
Thomas Graf9972b252005-06-18 22:57:26 -0700388static inline int __qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
389 struct sk_buff_head *list)
390{
391 __skb_queue_tail(list, skb);
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700392 sch->qstats.backlog += qdisc_pkt_len(skb);
393 sch->bstats.bytes += qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700394 sch->bstats.packets++;
395
396 return NET_XMIT_SUCCESS;
397}
398
399static inline int qdisc_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch)
400{
401 return __qdisc_enqueue_tail(skb, sch, &sch->q);
402}
403
404static inline struct sk_buff *__qdisc_dequeue_head(struct Qdisc *sch,
405 struct sk_buff_head *list)
406{
407 struct sk_buff *skb = __skb_dequeue(list);
408
409 if (likely(skb != NULL))
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700410 sch->qstats.backlog -= qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700411
412 return skb;
413}
414
415static inline struct sk_buff *qdisc_dequeue_head(struct Qdisc *sch)
416{
417 return __qdisc_dequeue_head(sch, &sch->q);
418}
419
420static inline struct sk_buff *__qdisc_dequeue_tail(struct Qdisc *sch,
421 struct sk_buff_head *list)
422{
423 struct sk_buff *skb = __skb_dequeue_tail(list);
424
425 if (likely(skb != NULL))
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700426 sch->qstats.backlog -= qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700427
428 return skb;
429}
430
431static inline struct sk_buff *qdisc_dequeue_tail(struct Qdisc *sch)
432{
433 return __qdisc_dequeue_tail(sch, &sch->q);
434}
435
Patrick McHardy48a8f512008-10-31 00:44:18 -0700436static inline struct sk_buff *qdisc_peek_head(struct Qdisc *sch)
437{
438 return skb_peek(&sch->q);
439}
440
Jarek Poplawski77be1552008-10-31 00:47:01 -0700441/* generic pseudo peek method for non-work-conserving qdisc */
442static inline struct sk_buff *qdisc_peek_dequeued(struct Qdisc *sch)
443{
444 /* we can reuse ->gso_skb because peek isn't called for root qdiscs */
445 if (!sch->gso_skb)
446 sch->gso_skb = sch->dequeue(sch);
447
448 return sch->gso_skb;
449}
450
451/* use instead of qdisc->dequeue() for all qdiscs queried with ->peek() */
452static inline struct sk_buff *qdisc_dequeue_peeked(struct Qdisc *sch)
453{
454 struct sk_buff *skb = sch->gso_skb;
455
456 if (skb)
457 sch->gso_skb = NULL;
458 else
459 skb = sch->dequeue(sch);
460
461 return skb;
462}
463
Thomas Graf9972b252005-06-18 22:57:26 -0700464static inline int __qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch,
465 struct sk_buff_head *list)
466{
467 __skb_queue_head(list, skb);
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700468 sch->qstats.backlog += qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700469 sch->qstats.requeues++;
470
471 return NET_XMIT_SUCCESS;
472}
473
474static inline int qdisc_requeue(struct sk_buff *skb, struct Qdisc *sch)
475{
476 return __qdisc_requeue(skb, sch, &sch->q);
477}
478
479static inline void __qdisc_reset_queue(struct Qdisc *sch,
480 struct sk_buff_head *list)
481{
482 /*
483 * We do not know the backlog in bytes of this list, it
484 * is up to the caller to correct it
485 */
David S. Miller93245dd2008-07-17 04:03:43 -0700486 __skb_queue_purge(list);
Thomas Graf9972b252005-06-18 22:57:26 -0700487}
488
489static inline void qdisc_reset_queue(struct Qdisc *sch)
490{
491 __qdisc_reset_queue(sch, &sch->q);
492 sch->qstats.backlog = 0;
493}
494
495static inline unsigned int __qdisc_queue_drop(struct Qdisc *sch,
496 struct sk_buff_head *list)
497{
498 struct sk_buff *skb = __qdisc_dequeue_tail(sch, list);
499
500 if (likely(skb != NULL)) {
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700501 unsigned int len = qdisc_pkt_len(skb);
Thomas Graf9972b252005-06-18 22:57:26 -0700502 kfree_skb(skb);
503 return len;
504 }
505
506 return 0;
507}
508
509static inline unsigned int qdisc_queue_drop(struct Qdisc *sch)
510{
511 return __qdisc_queue_drop(sch, &sch->q);
512}
513
514static inline int qdisc_drop(struct sk_buff *skb, struct Qdisc *sch)
515{
516 kfree_skb(skb);
517 sch->qstats.drops++;
518
519 return NET_XMIT_DROP;
520}
521
522static inline int qdisc_reshape_fail(struct sk_buff *skb, struct Qdisc *sch)
523{
524 sch->qstats.drops++;
525
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700526#ifdef CONFIG_NET_CLS_ACT
Thomas Graf9972b252005-06-18 22:57:26 -0700527 if (sch->reshape_fail == NULL || sch->reshape_fail(skb, sch))
528 goto drop;
529
530 return NET_XMIT_SUCCESS;
531
532drop:
533#endif
534 kfree_skb(skb);
535 return NET_XMIT_DROP;
536}
537
Jesper Dangaard Brouere9bef552007-09-12 16:35:24 +0200538/* Length to Time (L2T) lookup in a qdisc_rate_table, to determine how
539 long it will take to send a packet given its size.
540 */
541static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen)
542{
Jesper Dangaard Brouere08b0992007-09-12 16:36:28 +0200543 int slot = pktlen + rtab->rate.cell_align + rtab->rate.overhead;
544 if (slot < 0)
545 slot = 0;
Jesper Dangaard Brouere9bef552007-09-12 16:35:24 +0200546 slot >>= rtab->rate.cell_log;
547 if (slot > 255)
548 return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]);
549 return rtab->data[slot];
550}
551
Jamal Hadi Salim12da81d2007-10-26 02:47:23 -0700552#ifdef CONFIG_NET_CLS_ACT
553static inline struct sk_buff *skb_act_clone(struct sk_buff *skb, gfp_t gfp_mask)
554{
555 struct sk_buff *n = skb_clone(skb, gfp_mask);
556
557 if (n) {
558 n->tc_verd = SET_TC_VERD(n->tc_verd, 0);
559 n->tc_verd = CLR_TC_OK2MUNGE(n->tc_verd);
560 n->tc_verd = CLR_TC_MUNGED(n->tc_verd);
Jamal Hadi Salim12da81d2007-10-26 02:47:23 -0700561 }
562 return n;
563}
564#endif
565
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566#endif