blob: 0a7964009e8c99af197fb9454cf11e217fdf28bc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 */
11
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/types.h>
14#include <linux/kernel.h>
15#include <linux/jiffies.h>
16#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/in.h>
18#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/init.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070020#include <linux/skbuff.h>
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -070021#include <linux/jhash.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/slab.h>
Eric Dumazet817fb152011-01-20 00:14:58 +000023#include <linux/vmalloc.h>
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070024#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <net/pkt_sched.h>
Eric Dumazet11fca932011-11-29 03:40:45 +000026#include <net/flow_keys.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
28
29/* Stochastic Fairness Queuing algorithm.
30 =======================================
31
32 Source:
33 Paul E. McKenney "Stochastic Fairness Queuing",
34 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
35
36 Paul E. McKenney "Stochastic Fairness Queuing",
37 "Interworking: Research and Experience", v.2, 1991, p.113-131.
38
39
40 See also:
41 M. Shreedhar and George Varghese "Efficient Fair
42 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
43
44
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090045 This is not the thing that is usually called (W)FQ nowadays.
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 It does not use any timestamp mechanism, but instead
47 processes queues in round-robin order.
48
49 ADVANTAGE:
50
51 - It is very cheap. Both CPU and memory requirements are minimal.
52
53 DRAWBACKS:
54
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090055 - "Stochastic" -> It is not 100% fair.
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 When hash collisions occur, several flows are considered as one.
57
58 - "Round-robin" -> It introduces larger delays than virtual clock
59 based schemes, and should not be used for isolating interactive
60 traffic from non-interactive. It means, that this scheduler
61 should be used as leaf of CBQ or P3, which put interactive traffic
62 to higher priority band.
63
64 We still need true WFQ for top level CSZ, but using WFQ
65 for the best effort traffic is absolutely pointless:
66 SFQ is superior for this purpose.
67
68 IMPLEMENTATION:
Eric Dumazet18cb8092012-01-04 14:18:38 +000069 This implementation limits :
70 - maximal queue length per flow to 127 packets.
71 - max mtu to 2^18-1;
72 - max 65408 flows,
73 - number of hash buckets to 65536.
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 It is easy to increase these values, but not in flight. */
76
Eric Dumazet18cb8092012-01-04 14:18:38 +000077#define SFQ_MAX_DEPTH 127 /* max number of packets per flow */
78#define SFQ_DEFAULT_FLOWS 128
79#define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
80#define SFQ_EMPTY_SLOT 0xffff
Eric Dumazet817fb152011-01-20 00:14:58 +000081#define SFQ_DEFAULT_HASH_DIVISOR 1024
82
Eric Dumazeteeaeb062010-12-28 21:53:33 +000083/* We use 16 bits to store allot, and want to handle packets up to 64K
84 * Scale allot by 8 (1<<3) so that no overflow occurs.
85 */
86#define SFQ_ALLOT_SHIFT 3
87#define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Eric Dumazet18cb8092012-01-04 14:18:38 +000089/* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
90typedef u16 sfq_index;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Eric Dumazeteda83e32010-12-20 12:54:58 +000092/*
93 * We dont use pointers to save space.
Eric Dumazet18cb8092012-01-04 14:18:38 +000094 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
95 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
Eric Dumazeteda83e32010-12-20 12:54:58 +000096 * are 'pointers' to dep[] array
97 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +000098struct sfq_head {
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 sfq_index next;
100 sfq_index prev;
101};
102
Eric Dumazeteda83e32010-12-20 12:54:58 +0000103struct sfq_slot {
104 struct sk_buff *skblist_next;
105 struct sk_buff *skblist_prev;
106 sfq_index qlen; /* number of skbs in skblist */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000107 sfq_index next; /* next slot in sfq RR chain */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000108 struct sfq_head dep; /* anchor in dep[] chains */
109 unsigned short hash; /* hash value (index in ht[]) */
110 short allot; /* credit for this slot */
111};
112
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000113struct sfq_sched_data {
Eric Dumazet18cb8092012-01-04 14:18:38 +0000114/* frequently used fields */
115 int limit; /* limit of total number of packets in this qdisc */
Eric Dumazet817fb152011-01-20 00:14:58 +0000116 unsigned int divisor; /* number of slots in hash table */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000117 unsigned int maxflows; /* number of flows in flows array */
118 int headdrop;
119 int maxdepth; /* limit of packets per flow */
120
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700121 u32 perturbation;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000122 struct tcf_proto *filter_list;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000123 sfq_index cur_depth; /* depth of longest slot */
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000124 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000125 struct sfq_slot *tail; /* current slot in round */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000126 sfq_index *ht; /* Hash table ('divisor' slots) */
127 struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
128
129 struct sfq_head dep[SFQ_MAX_DEPTH + 1];
130 /* Linked lists of slots, indexed by depth
131 * dep[0] : list of unused flows
132 * dep[1] : list of flows with 1 packet
133 * dep[X] : list of flows with X packets
134 */
135
136 int perturb_period;
137 unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
138 struct timer_list perturb_timer;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139};
140
Eric Dumazeteda83e32010-12-20 12:54:58 +0000141/*
142 * sfq_head are either in a sfq_slot or in dep[] array
143 */
144static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
145{
Eric Dumazet18cb8092012-01-04 14:18:38 +0000146 if (val < SFQ_MAX_FLOWS)
Eric Dumazeteda83e32010-12-20 12:54:58 +0000147 return &q->slots[val].dep;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000148 return &q->dep[val - SFQ_MAX_FLOWS];
Eric Dumazeteda83e32010-12-20 12:54:58 +0000149}
150
Eric Dumazet225d9b82011-12-21 03:30:11 +0000151/*
152 * In order to be able to quickly rehash our queue when timer changes
153 * q->perturbation, we store flow_keys in skb->cb[]
154 */
155struct sfq_skb_cb {
156 struct flow_keys keys;
157};
158
159static inline struct sfq_skb_cb *sfq_skb_cb(const struct sk_buff *skb)
160{
161 BUILD_BUG_ON(sizeof(skb->cb) <
162 sizeof(struct qdisc_skb_cb) + sizeof(struct sfq_skb_cb));
163 return (struct sfq_skb_cb *)qdisc_skb_cb(skb)->data;
164}
165
Eric Dumazet11fca932011-11-29 03:40:45 +0000166static unsigned int sfq_hash(const struct sfq_sched_data *q,
167 const struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168{
Eric Dumazet225d9b82011-12-21 03:30:11 +0000169 const struct flow_keys *keys = &sfq_skb_cb(skb)->keys;
Eric Dumazet11fca932011-11-29 03:40:45 +0000170 unsigned int hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Eric Dumazet225d9b82011-12-21 03:30:11 +0000172 hash = jhash_3words((__force u32)keys->dst,
173 (__force u32)keys->src ^ keys->ip_proto,
174 (__force u32)keys->ports, q->perturbation);
Eric Dumazet11fca932011-11-29 03:40:45 +0000175 return hash & (q->divisor - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
177
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800178static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
179 int *qerr)
180{
181 struct sfq_sched_data *q = qdisc_priv(sch);
182 struct tcf_result res;
183 int result;
184
185 if (TC_H_MAJ(skb->priority) == sch->handle &&
186 TC_H_MIN(skb->priority) > 0 &&
Eric Dumazet817fb152011-01-20 00:14:58 +0000187 TC_H_MIN(skb->priority) <= q->divisor)
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800188 return TC_H_MIN(skb->priority);
189
Eric Dumazet225d9b82011-12-21 03:30:11 +0000190 if (!q->filter_list) {
191 skb_flow_dissect(skb, &sfq_skb_cb(skb)->keys);
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800192 return sfq_hash(q, skb) + 1;
Eric Dumazet225d9b82011-12-21 03:30:11 +0000193 }
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800194
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700195 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800196 result = tc_classify(skb, q->filter_list, &res);
197 if (result >= 0) {
198#ifdef CONFIG_NET_CLS_ACT
199 switch (result) {
200 case TC_ACT_STOLEN:
201 case TC_ACT_QUEUED:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700202 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800203 case TC_ACT_SHOT:
204 return 0;
205 }
206#endif
Eric Dumazet817fb152011-01-20 00:14:58 +0000207 if (TC_H_MIN(res.classid) <= q->divisor)
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800208 return TC_H_MIN(res.classid);
209 }
210 return 0;
211}
212
Eric Dumazeteda83e32010-12-20 12:54:58 +0000213/*
Eric Dumazet18cb8092012-01-04 14:18:38 +0000214 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
Eric Dumazeteda83e32010-12-20 12:54:58 +0000215 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
217{
218 sfq_index p, n;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000219 struct sfq_slot *slot = &q->slots[x];
220 int qlen = slot->qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Eric Dumazet18cb8092012-01-04 14:18:38 +0000222 p = qlen + SFQ_MAX_FLOWS;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000223 n = q->dep[qlen].next;
224
Eric Dumazet18cb8092012-01-04 14:18:38 +0000225 slot->dep.next = n;
226 slot->dep.prev = p;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000227
228 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
229 sfq_dep_head(q, n)->prev = x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230}
231
Eric Dumazeteda83e32010-12-20 12:54:58 +0000232#define sfq_unlink(q, x, n, p) \
233 n = q->slots[x].dep.next; \
234 p = q->slots[x].dep.prev; \
235 sfq_dep_head(q, p)->next = n; \
236 sfq_dep_head(q, n)->prev = p
237
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
240{
241 sfq_index p, n;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000242 int d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243
Eric Dumazeteda83e32010-12-20 12:54:58 +0000244 sfq_unlink(q, x, n, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
Eric Dumazeteda83e32010-12-20 12:54:58 +0000246 d = q->slots[x].qlen--;
247 if (n == p && q->cur_depth == d)
248 q->cur_depth--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 sfq_link(q, x);
250}
251
252static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
253{
254 sfq_index p, n;
255 int d;
256
Eric Dumazeteda83e32010-12-20 12:54:58 +0000257 sfq_unlink(q, x, n, p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Eric Dumazeteda83e32010-12-20 12:54:58 +0000259 d = ++q->slots[x].qlen;
260 if (q->cur_depth < d)
261 q->cur_depth = d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262 sfq_link(q, x);
263}
264
Eric Dumazeteda83e32010-12-20 12:54:58 +0000265/* helper functions : might be changed when/if skb use a standard list_head */
266
267/* remove one skb from tail of slot queue */
268static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
269{
270 struct sk_buff *skb = slot->skblist_prev;
271
272 slot->skblist_prev = skb->prev;
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800273 skb->prev->next = (struct sk_buff *)slot;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000274 skb->next = skb->prev = NULL;
275 return skb;
276}
277
278/* remove one skb from head of slot queue */
279static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
280{
281 struct sk_buff *skb = slot->skblist_next;
282
283 slot->skblist_next = skb->next;
Eric Dumazet18c8d822010-12-31 12:48:55 -0800284 skb->next->prev = (struct sk_buff *)slot;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000285 skb->next = skb->prev = NULL;
286 return skb;
287}
288
289static inline void slot_queue_init(struct sfq_slot *slot)
290{
Eric Dumazet18cb8092012-01-04 14:18:38 +0000291 memset(slot, 0, sizeof(*slot));
Eric Dumazeteda83e32010-12-20 12:54:58 +0000292 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
293}
294
295/* add skb to slot queue (tail add) */
296static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
297{
298 skb->prev = slot->skblist_prev;
299 skb->next = (struct sk_buff *)slot;
300 slot->skblist_prev->next = skb;
301 slot->skblist_prev = skb;
302}
303
304#define slot_queue_walk(slot, skb) \
305 for (skb = slot->skblist_next; \
306 skb != (struct sk_buff *)slot; \
307 skb = skb->next)
308
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309static unsigned int sfq_drop(struct Qdisc *sch)
310{
311 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000312 sfq_index x, d = q->cur_depth;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 struct sk_buff *skb;
314 unsigned int len;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000315 struct sfq_slot *slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316
Eric Dumazeteda83e32010-12-20 12:54:58 +0000317 /* Queue is full! Find the longest slot and drop tail packet from it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 if (d > 1) {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000319 x = q->dep[d].next;
320 slot = &q->slots[x];
321drop:
Eric Dumazet18cb8092012-01-04 14:18:38 +0000322 skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700323 len = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 sfq_dec(q, x);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000325 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 sch->q.qlen--;
327 sch->qstats.drops++;
Patrick McHardyf5539eb2006-03-20 19:01:38 -0800328 sch->qstats.backlog -= len;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 return len;
330 }
331
332 if (d == 1) {
333 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000334 x = q->tail->next;
335 slot = &q->slots[x];
336 q->tail->next = slot->next;
337 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
338 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 }
340
341 return 0;
342}
343
344static int
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800345sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
347 struct sfq_sched_data *q = qdisc_priv(sch);
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800348 unsigned int hash;
Eric Dumazet8efa8852011-05-23 11:02:42 +0000349 sfq_index x, qlen;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000350 struct sfq_slot *slot;
Jarek Poplawski7f3ff4f2008-12-21 20:14:48 -0800351 int uninitialized_var(ret);
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800352
353 hash = sfq_classify(skb, sch, &ret);
354 if (hash == 0) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700355 if (ret & __NET_XMIT_BYPASS)
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800356 sch->qstats.drops++;
357 kfree_skb(skb);
358 return ret;
359 }
360 hash--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361
362 x = q->ht[hash];
Eric Dumazeteda83e32010-12-20 12:54:58 +0000363 slot = &q->slots[x];
364 if (x == SFQ_EMPTY_SLOT) {
365 x = q->dep[0].next; /* get a free slot */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000366 if (x >= SFQ_MAX_FLOWS)
367 return qdisc_drop(skb, sch);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000368 q->ht[hash] = x;
369 slot = &q->slots[x];
370 slot->hash = hash;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800372
Eric Dumazet18cb8092012-01-04 14:18:38 +0000373 if (slot->qlen >= q->maxdepth) {
374 struct sk_buff *head;
375
376 if (!q->headdrop)
377 return qdisc_drop(skb, sch);
378
379 head = slot_dequeue_head(slot);
380 sch->qstats.backlog -= qdisc_pkt_len(head);
381 qdisc_drop(head, sch);
382
383 sch->qstats.backlog += qdisc_pkt_len(skb);
384 slot_queue_add(slot, skb);
385 return NET_XMIT_CN;
386 }
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700387
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700388 sch->qstats.backlog += qdisc_pkt_len(skb);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000389 slot_queue_add(slot, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 sfq_inc(q, x);
Eric Dumazeteda83e32010-12-20 12:54:58 +0000391 if (slot->qlen == 1) { /* The flow is new */
392 if (q->tail == NULL) { /* It is the first flow */
393 slot->next = x;
Eric Dumazetd47a0ac2012-01-01 18:33:31 +0000394 q->tail = slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 } else {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000396 slot->next = q->tail->next;
397 q->tail->next = x;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 }
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000399 slot->allot = q->scaled_quantum;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 }
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800401 if (++sch->q.qlen <= q->limit)
Ben Greear9871e502010-08-10 01:45:40 -0700402 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
Eric Dumazet8efa8852011-05-23 11:02:42 +0000404 qlen = slot->qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 sfq_drop(sch);
Eric Dumazet8efa8852011-05-23 11:02:42 +0000406 /* Return Congestion Notification only if we dropped a packet
407 * from this flow.
408 */
Eric Dumazete1738bd2011-07-29 19:22:42 +0000409 if (qlen != slot->qlen)
410 return NET_XMIT_CN;
411
412 /* As we dropped a packet, better let upper stack know this */
413 qdisc_tree_decrease_qlen(sch, 1);
414 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
Patrick McHardy48a8f512008-10-31 00:44:18 -0700417static struct sk_buff *
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800418sfq_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419{
420 struct sfq_sched_data *q = qdisc_priv(sch);
421 struct sk_buff *skb;
Eric Dumazetaa3e2192010-12-20 13:18:16 -0800422 sfq_index a, next_a;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000423 struct sfq_slot *slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
425 /* No active slots */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000426 if (q->tail == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 return NULL;
428
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000429next_slot:
Eric Dumazeteda83e32010-12-20 12:54:58 +0000430 a = q->tail->next;
431 slot = &q->slots[a];
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000432 if (slot->allot <= 0) {
433 q->tail = slot;
434 slot->allot += q->scaled_quantum;
435 goto next_slot;
436 }
Eric Dumazeteda83e32010-12-20 12:54:58 +0000437 skb = slot_dequeue_head(slot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 sfq_dec(q, a);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800439 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 sch->q.qlen--;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700441 sch->qstats.backlog -= qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
443 /* Is the slot empty? */
Eric Dumazeteda83e32010-12-20 12:54:58 +0000444 if (slot->qlen == 0) {
445 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
446 next_a = slot->next;
Eric Dumazetaa3e2192010-12-20 13:18:16 -0800447 if (a == next_a) {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000448 q->tail = NULL; /* no more active slots */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 return skb;
450 }
Eric Dumazeteda83e32010-12-20 12:54:58 +0000451 q->tail->next = next_a;
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000452 } else {
453 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 }
455 return skb;
456}
457
458static void
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800459sfq_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460{
461 struct sk_buff *skb;
462
463 while ((skb = sfq_dequeue(sch)) != NULL)
464 kfree_skb(skb);
465}
466
Eric Dumazet225d9b82011-12-21 03:30:11 +0000467/*
468 * When q->perturbation is changed, we rehash all queued skbs
469 * to avoid OOO (Out Of Order) effects.
470 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
471 * counters.
472 */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000473static void sfq_rehash(struct Qdisc *sch)
Eric Dumazet225d9b82011-12-21 03:30:11 +0000474{
Eric Dumazet18cb8092012-01-04 14:18:38 +0000475 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000476 struct sk_buff *skb;
477 int i;
478 struct sfq_slot *slot;
479 struct sk_buff_head list;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000480 int dropped = 0;
Eric Dumazet225d9b82011-12-21 03:30:11 +0000481
482 __skb_queue_head_init(&list);
483
Eric Dumazet18cb8092012-01-04 14:18:38 +0000484 for (i = 0; i < q->maxflows; i++) {
Eric Dumazet225d9b82011-12-21 03:30:11 +0000485 slot = &q->slots[i];
486 if (!slot->qlen)
487 continue;
488 while (slot->qlen) {
489 skb = slot_dequeue_head(slot);
490 sfq_dec(q, i);
491 __skb_queue_tail(&list, skb);
492 }
493 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
494 }
495 q->tail = NULL;
496
497 while ((skb = __skb_dequeue(&list)) != NULL) {
498 unsigned int hash = sfq_hash(q, skb);
499 sfq_index x = q->ht[hash];
500
501 slot = &q->slots[x];
502 if (x == SFQ_EMPTY_SLOT) {
503 x = q->dep[0].next; /* get a free slot */
Eric Dumazet18cb8092012-01-04 14:18:38 +0000504 if (x >= SFQ_MAX_FLOWS) {
505drop: sch->qstats.backlog -= qdisc_pkt_len(skb);
506 kfree_skb(skb);
507 dropped++;
508 continue;
509 }
Eric Dumazet225d9b82011-12-21 03:30:11 +0000510 q->ht[hash] = x;
511 slot = &q->slots[x];
512 slot->hash = hash;
513 }
Eric Dumazet18cb8092012-01-04 14:18:38 +0000514 if (slot->qlen >= q->maxdepth)
515 goto drop;
Eric Dumazet225d9b82011-12-21 03:30:11 +0000516 slot_queue_add(slot, skb);
517 sfq_inc(q, x);
518 if (slot->qlen == 1) { /* The flow is new */
519 if (q->tail == NULL) { /* It is the first flow */
520 slot->next = x;
521 } else {
522 slot->next = q->tail->next;
523 q->tail->next = x;
524 }
525 q->tail = slot;
526 slot->allot = q->scaled_quantum;
527 }
528 }
Eric Dumazet18cb8092012-01-04 14:18:38 +0000529 sch->q.qlen -= dropped;
530 qdisc_tree_decrease_qlen(sch, dropped);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000531}
532
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533static void sfq_perturbation(unsigned long arg)
534{
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800535 struct Qdisc *sch = (struct Qdisc *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000537 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Eric Dumazet225d9b82011-12-21 03:30:11 +0000539 spin_lock(root_lock);
Stephen Hemmingerd46f8dd2008-01-20 17:19:43 -0800540 q->perturbation = net_random();
Eric Dumazet225d9b82011-12-21 03:30:11 +0000541 if (!q->filter_list && q->tail)
Eric Dumazet18cb8092012-01-04 14:18:38 +0000542 sfq_rehash(sch);
Eric Dumazet225d9b82011-12-21 03:30:11 +0000543 spin_unlock(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700545 if (q->perturb_period)
546 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547}
548
Patrick McHardy1e904742008-01-22 22:11:17 -0800549static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550{
551 struct sfq_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800552 struct tc_sfq_qopt *ctl = nla_data(opt);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000553 struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
Patrick McHardy5e50da02006-11-29 17:36:20 -0800554 unsigned int qlen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Patrick McHardy1e904742008-01-22 22:11:17 -0800556 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 return -EINVAL;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000558 if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
559 ctl_v1 = nla_data(opt);
stephen hemminger119b3d32011-02-02 15:19:51 +0000560 if (ctl->divisor &&
561 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
562 return -EINVAL;
563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 sch_tree_lock(sch);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000565 if (ctl->quantum) {
566 q->quantum = ctl->quantum;
567 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
568 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800569 q->perturb_period = ctl->perturb_period * HZ;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000570 if (ctl->flows)
571 q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
572 if (ctl->divisor) {
Eric Dumazet817fb152011-01-20 00:14:58 +0000573 q->divisor = ctl->divisor;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000574 q->maxflows = min_t(u32, q->maxflows, q->divisor);
575 }
576 if (ctl_v1) {
577 if (ctl_v1->depth)
578 q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
579 q->headdrop = ctl_v1->headdrop;
580 }
581 if (ctl->limit) {
582 q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
583 q->maxflows = min_t(u32, q->maxflows, q->limit);
584 }
585
Patrick McHardy5e50da02006-11-29 17:36:20 -0800586 qlen = sch->q.qlen;
Alexey Kuznetsov5588b402007-09-19 10:42:03 -0700587 while (sch->q.qlen > q->limit)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 sfq_drop(sch);
Patrick McHardy5e50da02006-11-29 17:36:20 -0800589 qdisc_tree_decrease_qlen(sch, qlen - sch->q.qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
591 del_timer(&q->perturb_timer);
592 if (q->perturb_period) {
Alexey Kuznetsov32740dd2007-09-30 17:51:33 -0700593 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
Stephen Hemmingerd46f8dd2008-01-20 17:19:43 -0800594 q->perturbation = net_random();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 }
596 sch_tree_unlock(sch);
597 return 0;
598}
599
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000600static void *sfq_alloc(size_t sz)
601{
602 void *ptr = kmalloc(sz, GFP_KERNEL | __GFP_NOWARN);
603
604 if (!ptr)
605 ptr = vmalloc(sz);
606 return ptr;
607}
608
609static void sfq_free(void *addr)
610{
611 if (addr) {
612 if (is_vmalloc_addr(addr))
613 vfree(addr);
614 else
615 kfree(addr);
616 }
617}
618
619static void sfq_destroy(struct Qdisc *sch)
620{
621 struct sfq_sched_data *q = qdisc_priv(sch);
622
623 tcf_destroy_chain(&q->filter_list);
624 q->perturb_period = 0;
625 del_timer_sync(&q->perturb_timer);
626 sfq_free(q->ht);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000627 sfq_free(q->slots);
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000628}
629
Patrick McHardy1e904742008-01-22 22:11:17 -0800630static int sfq_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631{
632 struct sfq_sched_data *q = qdisc_priv(sch);
633 int i;
634
Stephen Hemmingerd3e99482008-01-20 17:18:45 -0800635 q->perturb_timer.function = sfq_perturbation;
Fernando Carrijoc19a28e2009-01-07 18:09:08 -0800636 q->perturb_timer.data = (unsigned long)sch;
Stephen Hemmingerd3e99482008-01-20 17:18:45 -0800637 init_timer_deferrable(&q->perturb_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Eric Dumazet18cb8092012-01-04 14:18:38 +0000639 for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
640 q->dep[i].next = i + SFQ_MAX_FLOWS;
641 q->dep[i].prev = i + SFQ_MAX_FLOWS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800643
Eric Dumazet18cb8092012-01-04 14:18:38 +0000644 q->limit = SFQ_MAX_DEPTH;
645 q->maxdepth = SFQ_MAX_DEPTH;
Eric Dumazeteda83e32010-12-20 12:54:58 +0000646 q->cur_depth = 0;
647 q->tail = NULL;
Eric Dumazet817fb152011-01-20 00:14:58 +0000648 q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
Eric Dumazet18cb8092012-01-04 14:18:38 +0000649 q->maxflows = SFQ_DEFAULT_FLOWS;
Eric Dumazet02a90982012-01-04 06:23:01 +0000650 q->quantum = psched_mtu(qdisc_dev(sch));
651 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
652 q->perturb_period = 0;
653 q->perturbation = net_random();
654
655 if (opt) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 int err = sfq_change(sch, opt);
657 if (err)
658 return err;
659 }
Stephen Hemminger6f9e98f2008-01-20 17:20:56 -0800660
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000661 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000662 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
663 if (!q->ht || !q->slots) {
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000664 sfq_destroy(sch);
Eric Dumazet817fb152011-01-20 00:14:58 +0000665 return -ENOMEM;
Eric Dumazetbd16a6c2012-01-04 06:22:24 +0000666 }
Eric Dumazet817fb152011-01-20 00:14:58 +0000667 for (i = 0; i < q->divisor; i++)
668 q->ht[i] = SFQ_EMPTY_SLOT;
669
Eric Dumazet18cb8092012-01-04 14:18:38 +0000670 for (i = 0; i < q->maxflows; i++) {
Eric Dumazet18c8d822010-12-31 12:48:55 -0800671 slot_queue_init(&q->slots[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 sfq_link(q, i);
Eric Dumazet18c8d822010-12-31 12:48:55 -0800673 }
Eric Dumazet23624932011-01-21 16:26:09 -0800674 if (q->limit >= 1)
675 sch->flags |= TCQ_F_CAN_BYPASS;
676 else
677 sch->flags &= ~TCQ_F_CAN_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 return 0;
679}
680
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
682{
683 struct sfq_sched_data *q = qdisc_priv(sch);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700684 unsigned char *b = skb_tail_pointer(skb);
Eric Dumazet18cb8092012-01-04 14:18:38 +0000685 struct tc_sfq_qopt_v1 opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Eric Dumazet18cb8092012-01-04 14:18:38 +0000687 memset(&opt, 0, sizeof(opt));
688 opt.v0.quantum = q->quantum;
689 opt.v0.perturb_period = q->perturb_period / HZ;
690 opt.v0.limit = q->limit;
691 opt.v0.divisor = q->divisor;
692 opt.v0.flows = q->maxflows;
693 opt.depth = q->maxdepth;
694 opt.headdrop = q->headdrop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Patrick McHardy1e904742008-01-22 22:11:17 -0800696 NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698 return skb->len;
699
Patrick McHardy1e904742008-01-22 22:11:17 -0800700nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -0700701 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 return -1;
703}
704
Jarek Poplawski41065fb2010-08-10 22:31:02 +0000705static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
706{
707 return NULL;
708}
709
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800710static unsigned long sfq_get(struct Qdisc *sch, u32 classid)
711{
712 return 0;
713}
714
Jarek Poplawskieb4a5522010-08-06 00:22:35 +0000715static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
716 u32 classid)
717{
Eric Dumazet23624932011-01-21 16:26:09 -0800718 /* we cannot bypass queue discipline anymore */
719 sch->flags &= ~TCQ_F_CAN_BYPASS;
Jarek Poplawskieb4a5522010-08-06 00:22:35 +0000720 return 0;
721}
722
Jarek Poplawskida7115d2010-08-09 12:18:17 +0000723static void sfq_put(struct Qdisc *q, unsigned long cl)
724{
725}
726
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800727static struct tcf_proto **sfq_find_tcf(struct Qdisc *sch, unsigned long cl)
728{
729 struct sfq_sched_data *q = qdisc_priv(sch);
730
731 if (cl)
732 return NULL;
733 return &q->filter_list;
734}
735
Patrick McHardy94de78d2008-01-31 18:37:16 -0800736static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
737 struct sk_buff *skb, struct tcmsg *tcm)
738{
739 tcm->tcm_handle |= TC_H_MIN(cl);
740 return 0;
741}
742
743static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
744 struct gnet_dump *d)
745{
746 struct sfq_sched_data *q = qdisc_priv(sch);
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800747 sfq_index idx = q->ht[cl - 1];
748 struct gnet_stats_queue qs = { 0 };
749 struct tc_sfq_xstats xstats = { 0 };
Eric Dumazetc4266262010-12-15 08:18:36 +0000750 struct sk_buff *skb;
751
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800752 if (idx != SFQ_EMPTY_SLOT) {
753 const struct sfq_slot *slot = &q->slots[idx];
Patrick McHardy94de78d2008-01-31 18:37:16 -0800754
Eric Dumazeteeaeb062010-12-28 21:53:33 +0000755 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
Eric Dumazetee09b3c2010-12-22 11:39:59 -0800756 qs.qlen = slot->qlen;
757 slot_queue_walk(slot, skb)
758 qs.backlog += qdisc_pkt_len(skb);
759 }
Patrick McHardy94de78d2008-01-31 18:37:16 -0800760 if (gnet_stats_copy_queue(d, &qs) < 0)
761 return -1;
762 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
763}
764
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800765static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
766{
Patrick McHardy94de78d2008-01-31 18:37:16 -0800767 struct sfq_sched_data *q = qdisc_priv(sch);
768 unsigned int i;
769
770 if (arg->stop)
771 return;
772
Eric Dumazet817fb152011-01-20 00:14:58 +0000773 for (i = 0; i < q->divisor; i++) {
Eric Dumazeteda83e32010-12-20 12:54:58 +0000774 if (q->ht[i] == SFQ_EMPTY_SLOT ||
Patrick McHardy94de78d2008-01-31 18:37:16 -0800775 arg->count < arg->skip) {
776 arg->count++;
777 continue;
778 }
779 if (arg->fn(sch, i + 1, arg) < 0) {
780 arg->stop = 1;
781 break;
782 }
783 arg->count++;
784 }
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800785}
786
787static const struct Qdisc_class_ops sfq_class_ops = {
Jarek Poplawski41065fb2010-08-10 22:31:02 +0000788 .leaf = sfq_leaf,
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800789 .get = sfq_get,
Jarek Poplawskida7115d2010-08-09 12:18:17 +0000790 .put = sfq_put,
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800791 .tcf_chain = sfq_find_tcf,
Jarek Poplawskieb4a5522010-08-06 00:22:35 +0000792 .bind_tcf = sfq_bind,
Jarek Poplawskida7115d2010-08-09 12:18:17 +0000793 .unbind_tcf = sfq_put,
Patrick McHardy94de78d2008-01-31 18:37:16 -0800794 .dump = sfq_dump_class,
795 .dump_stats = sfq_dump_class_stats,
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800796 .walk = sfq_walk,
797};
798
Eric Dumazet20fea082007-11-14 01:44:41 -0800799static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
Patrick McHardy7d2681a2008-01-31 18:36:52 -0800800 .cl_ops = &sfq_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 .id = "sfq",
802 .priv_size = sizeof(struct sfq_sched_data),
803 .enqueue = sfq_enqueue,
804 .dequeue = sfq_dequeue,
Eric Dumazet07bd8df2011-05-25 04:40:11 +0000805 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 .drop = sfq_drop,
807 .init = sfq_init,
808 .reset = sfq_reset,
809 .destroy = sfq_destroy,
810 .change = NULL,
811 .dump = sfq_dump,
812 .owner = THIS_MODULE,
813};
814
815static int __init sfq_module_init(void)
816{
817 return register_qdisc(&sfq_qdisc_ops);
818}
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900819static void __exit sfq_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820{
821 unregister_qdisc(&sfq_qdisc_ops);
822}
823module_init(sfq_module_init)
824module_exit(sfq_module_exit)
825MODULE_LICENSE("GPL");