blob: dba70377bbd9793f8ab0ff3a0f1703d96870b8e0 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
stephen hemminger45e14432011-02-02 15:21:10 +00002/*
3 * net/sched/sch_choke.c CHOKE scheduler
4 *
5 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
6 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
stephen hemminger45e14432011-02-02 15:21:10 +00007 */
8
9#include <linux/module.h>
10#include <linux/types.h>
11#include <linux/kernel.h>
12#include <linux/skbuff.h>
David S. Millercdfb74d2011-02-02 23:06:31 -080013#include <linux/vmalloc.h>
stephen hemminger45e14432011-02-02 15:21:10 +000014#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010015#include <net/pkt_cls.h>
stephen hemminger45e14432011-02-02 15:21:10 +000016#include <net/inet_ecn.h>
17#include <net/red.h>
Jiri Pirko1bd758e2015-05-12 14:56:07 +020018#include <net/flow_dissector.h>
stephen hemminger45e14432011-02-02 15:21:10 +000019
20/*
21 CHOKe stateless AQM for fair bandwidth allocation
22 =================================================
23
24 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
25 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
26 maintains no flow state. The difference from RED is an additional step
27 during the enqueuing process. If average queue size is over the
28 low threshold (qmin), a packet is chosen at random from the queue.
29 If both the new and chosen packet are from the same flow, both
30 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
31 needs to access packets in queue randomly. It has a minimal class
32 interface to allow overriding the builtin flow classifier with
33 filters.
34
35 Source:
36 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
37 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
38 IEEE INFOCOM, 2000.
39
40 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
41 Characteristics", IEEE/ACM Transactions on Networking, 2004
42
43 */
44
45/* Upper bound on size of sk_buff table (packets) */
46#define CHOKE_MAX_QUEUE (128*1024 - 1)
47
48struct choke_sched_data {
49/* Parameters */
50 u32 limit;
51 unsigned char flags;
52
53 struct red_parms parms;
54
55/* Variables */
Eric Dumazeteeca6682012-01-05 02:25:16 +000056 struct red_vars vars;
stephen hemminger45e14432011-02-02 15:21:10 +000057 struct {
58 u32 prob_drop; /* Early probability drops */
59 u32 prob_mark; /* Early probability marks */
60 u32 forced_drop; /* Forced drops, qavg > max_thresh */
61 u32 forced_mark; /* Forced marks, qavg > max_thresh */
62 u32 pdrop; /* Drops due to queue limits */
63 u32 other; /* Drops due to drop() calls */
64 u32 matched; /* Drops to flow match */
65 } stats;
66
67 unsigned int head;
68 unsigned int tail;
69
70 unsigned int tab_mask; /* size - 1 */
71
72 struct sk_buff **tab;
73};
74
stephen hemminger45e14432011-02-02 15:21:10 +000075/* number of elements in queue including holes */
76static unsigned int choke_len(const struct choke_sched_data *q)
77{
78 return (q->tail - q->head) & q->tab_mask;
79}
80
81/* Is ECN parameter configured */
82static int use_ecn(const struct choke_sched_data *q)
83{
84 return q->flags & TC_RED_ECN;
85}
86
87/* Should packets over max just be dropped (versus marked) */
88static int use_harddrop(const struct choke_sched_data *q)
89{
90 return q->flags & TC_RED_HARDDROP;
91}
92
93/* Move head pointer forward to skip over holes */
94static void choke_zap_head_holes(struct choke_sched_data *q)
95{
96 do {
97 q->head = (q->head + 1) & q->tab_mask;
98 if (q->head == q->tail)
99 break;
100 } while (q->tab[q->head] == NULL);
101}
102
103/* Move tail pointer backwards to reuse holes */
104static void choke_zap_tail_holes(struct choke_sched_data *q)
105{
106 do {
107 q->tail = (q->tail - 1) & q->tab_mask;
108 if (q->head == q->tail)
109 break;
110 } while (q->tab[q->tail] == NULL);
111}
112
113/* Drop packet from queue array by creating a "hole" */
Eric Dumazet520ac302016-06-21 23:16:49 -0700114static void choke_drop_by_idx(struct Qdisc *sch, unsigned int idx,
115 struct sk_buff **to_free)
stephen hemminger45e14432011-02-02 15:21:10 +0000116{
117 struct choke_sched_data *q = qdisc_priv(sch);
118 struct sk_buff *skb = q->tab[idx];
119
120 q->tab[idx] = NULL;
121
122 if (idx == q->head)
123 choke_zap_head_holes(q);
124 if (idx == q->tail)
125 choke_zap_tail_holes(q);
126
John Fastabend25331d62014-09-28 11:53:29 -0700127 qdisc_qstats_backlog_dec(sch, skb);
WANG Cong2ccccf52016-02-25 14:55:01 -0800128 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
Eric Dumazet520ac302016-06-21 23:16:49 -0700129 qdisc_drop(skb, sch, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000130 --sch->q.qlen;
131}
132
Eric Dumazet26f70e12011-02-24 17:45:41 +0000133struct choke_skb_cb {
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000134 u16 classid;
135 u8 keys_valid;
Tom Herbert2e994032015-05-01 11:30:18 -0700136 struct flow_keys_digest keys;
Eric Dumazet26f70e12011-02-24 17:45:41 +0000137};
138
139static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb)
140{
David S. Miller16bda132012-02-06 15:14:37 -0500141 qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb));
Eric Dumazet26f70e12011-02-24 17:45:41 +0000142 return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data;
143}
144
stephen hemminger45e14432011-02-02 15:21:10 +0000145static inline void choke_set_classid(struct sk_buff *skb, u16 classid)
146{
Eric Dumazet26f70e12011-02-24 17:45:41 +0000147 choke_skb_cb(skb)->classid = classid;
stephen hemminger45e14432011-02-02 15:21:10 +0000148}
149
stephen hemminger45e14432011-02-02 15:21:10 +0000150/*
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000151 * Compare flow of two packets
152 * Returns true only if source and destination address and port match.
153 * false for special cases
154 */
155static bool choke_match_flow(struct sk_buff *skb1,
156 struct sk_buff *skb2)
157{
Eric Dumazet25711782014-09-18 08:02:05 -0700158 struct flow_keys temp;
159
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000160 if (skb1->protocol != skb2->protocol)
161 return false;
162
163 if (!choke_skb_cb(skb1)->keys_valid) {
164 choke_skb_cb(skb1)->keys_valid = 1;
Tom Herbertcd79a232015-09-01 09:24:27 -0700165 skb_flow_dissect_flow_keys(skb1, &temp, 0);
Tom Herbert2e994032015-05-01 11:30:18 -0700166 make_flow_keys_digest(&choke_skb_cb(skb1)->keys, &temp);
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000167 }
168
169 if (!choke_skb_cb(skb2)->keys_valid) {
170 choke_skb_cb(skb2)->keys_valid = 1;
Tom Herbertcd79a232015-09-01 09:24:27 -0700171 skb_flow_dissect_flow_keys(skb2, &temp, 0);
Tom Herbert2e994032015-05-01 11:30:18 -0700172 make_flow_keys_digest(&choke_skb_cb(skb2)->keys, &temp);
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000173 }
174
175 return !memcmp(&choke_skb_cb(skb1)->keys,
176 &choke_skb_cb(skb2)->keys,
Tom Herbert2e994032015-05-01 11:30:18 -0700177 sizeof(choke_skb_cb(skb1)->keys));
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000178}
179
180/*
stephen hemminger45e14432011-02-02 15:21:10 +0000181 * Select a packet at random from queue
182 * HACK: since queue can have holes from previous deletion; retry several
183 * times to find a random skb but then just give up and return the head
184 * Will return NULL if queue is empty (q->head == q->tail)
185 */
186static struct sk_buff *choke_peek_random(const struct choke_sched_data *q,
187 unsigned int *pidx)
188{
189 struct sk_buff *skb;
190 int retrys = 3;
191
192 do {
Daniel Borkmannf337db62014-01-22 02:29:39 +0100193 *pidx = (q->head + prandom_u32_max(choke_len(q))) & q->tab_mask;
stephen hemminger45e14432011-02-02 15:21:10 +0000194 skb = q->tab[*pidx];
195 if (skb)
196 return skb;
197 } while (--retrys > 0);
198
199 return q->tab[*pidx = q->head];
200}
201
202/*
203 * Compare new packet with random packet in queue
204 * returns true if matched and sets *pidx
205 */
206static bool choke_match_random(const struct choke_sched_data *q,
207 struct sk_buff *nskb,
208 unsigned int *pidx)
209{
210 struct sk_buff *oskb;
211
212 if (q->head == q->tail)
213 return false;
214
215 oskb = choke_peek_random(q, pidx);
stephen hemminger45e14432011-02-02 15:21:10 +0000216 return choke_match_flow(oskb, nskb);
217}
218
Eric Dumazet520ac302016-06-21 23:16:49 -0700219static int choke_enqueue(struct sk_buff *skb, struct Qdisc *sch,
220 struct sk_buff **to_free)
stephen hemminger45e14432011-02-02 15:21:10 +0000221{
222 struct choke_sched_data *q = qdisc_priv(sch);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000223 const struct red_parms *p = &q->parms;
stephen hemminger45e14432011-02-02 15:21:10 +0000224
Eric Dumazet2bcc34b2011-11-29 04:22:15 +0000225 choke_skb_cb(skb)->keys_valid = 0;
stephen hemminger45e14432011-02-02 15:21:10 +0000226 /* Compute average queue usage (see RED) */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000227 q->vars.qavg = red_calc_qavg(p, &q->vars, sch->q.qlen);
228 if (red_is_idling(&q->vars))
229 red_end_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000230
231 /* Is queue small? */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000232 if (q->vars.qavg <= p->qth_min)
233 q->vars.qcount = -1;
stephen hemminger45e14432011-02-02 15:21:10 +0000234 else {
235 unsigned int idx;
236
237 /* Draw a packet at random from queue and compare flow */
238 if (choke_match_random(q, skb, &idx)) {
239 q->stats.matched++;
Eric Dumazet520ac302016-06-21 23:16:49 -0700240 choke_drop_by_idx(sch, idx, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000241 goto congestion_drop;
242 }
243
244 /* Queue is large, always mark/drop */
Eric Dumazeteeca6682012-01-05 02:25:16 +0000245 if (q->vars.qavg > p->qth_max) {
246 q->vars.qcount = -1;
stephen hemminger45e14432011-02-02 15:21:10 +0000247
John Fastabend25331d62014-09-28 11:53:29 -0700248 qdisc_qstats_overlimit(sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000249 if (use_harddrop(q) || !use_ecn(q) ||
250 !INET_ECN_set_ce(skb)) {
251 q->stats.forced_drop++;
252 goto congestion_drop;
253 }
254
255 q->stats.forced_mark++;
Eric Dumazeteeca6682012-01-05 02:25:16 +0000256 } else if (++q->vars.qcount) {
257 if (red_mark_probability(p, &q->vars, q->vars.qavg)) {
258 q->vars.qcount = 0;
259 q->vars.qR = red_random(p);
stephen hemminger45e14432011-02-02 15:21:10 +0000260
John Fastabend25331d62014-09-28 11:53:29 -0700261 qdisc_qstats_overlimit(sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000262 if (!use_ecn(q) || !INET_ECN_set_ce(skb)) {
263 q->stats.prob_drop++;
264 goto congestion_drop;
265 }
266
267 q->stats.prob_mark++;
268 }
269 } else
Eric Dumazeteeca6682012-01-05 02:25:16 +0000270 q->vars.qR = red_random(p);
stephen hemminger45e14432011-02-02 15:21:10 +0000271 }
272
273 /* Admit new packet */
274 if (sch->q.qlen < q->limit) {
275 q->tab[q->tail] = skb;
276 q->tail = (q->tail + 1) & q->tab_mask;
277 ++sch->q.qlen;
John Fastabend25331d62014-09-28 11:53:29 -0700278 qdisc_qstats_backlog_inc(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000279 return NET_XMIT_SUCCESS;
280 }
281
282 q->stats.pdrop++;
Eric Dumazet520ac302016-06-21 23:16:49 -0700283 return qdisc_drop(skb, sch, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000284
Eric Dumazet17045752012-05-04 04:37:21 +0000285congestion_drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700286 qdisc_drop(skb, sch, to_free);
stephen hemminger45e14432011-02-02 15:21:10 +0000287 return NET_XMIT_CN;
stephen hemminger45e14432011-02-02 15:21:10 +0000288}
289
290static struct sk_buff *choke_dequeue(struct Qdisc *sch)
291{
292 struct choke_sched_data *q = qdisc_priv(sch);
293 struct sk_buff *skb;
294
295 if (q->head == q->tail) {
Eric Dumazeteeca6682012-01-05 02:25:16 +0000296 if (!red_is_idling(&q->vars))
297 red_start_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000298 return NULL;
299 }
300
301 skb = q->tab[q->head];
302 q->tab[q->head] = NULL;
303 choke_zap_head_holes(q);
304 --sch->q.qlen;
John Fastabend25331d62014-09-28 11:53:29 -0700305 qdisc_qstats_backlog_dec(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000306 qdisc_bstats_update(sch, skb);
307
308 return skb;
309}
310
stephen hemminger45e14432011-02-02 15:21:10 +0000311static void choke_reset(struct Qdisc *sch)
312{
313 struct choke_sched_data *q = qdisc_priv(sch);
314
WANG Cong77e62da2015-07-21 16:52:43 -0700315 while (q->head != q->tail) {
316 struct sk_buff *skb = q->tab[q->head];
317
318 q->head = (q->head + 1) & q->tab_mask;
319 if (!skb)
320 continue;
Eric Dumazetf9aed312016-06-13 20:21:51 -0700321 rtnl_qdisc_drop(skb, sch);
WANG Cong77e62da2015-07-21 16:52:43 -0700322 }
323
Eric Dumazetf9aed312016-06-13 20:21:51 -0700324 sch->q.qlen = 0;
325 sch->qstats.backlog = 0;
WANG Cong77e62da2015-07-21 16:52:43 -0700326 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
327 q->head = q->tail = 0;
Eric Dumazeteeca6682012-01-05 02:25:16 +0000328 red_restart(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000329}
330
331static const struct nla_policy choke_policy[TCA_CHOKE_MAX + 1] = {
332 [TCA_CHOKE_PARMS] = { .len = sizeof(struct tc_red_qopt) },
333 [TCA_CHOKE_STAB] = { .len = RED_STAB_SIZE },
Eric Dumazeta73ed262011-12-09 02:46:45 +0000334 [TCA_CHOKE_MAX_P] = { .type = NLA_U32 },
stephen hemminger45e14432011-02-02 15:21:10 +0000335};
336
337
338static void choke_free(void *addr)
339{
WANG Cong4cb28972014-06-02 15:55:22 -0700340 kvfree(addr);
stephen hemminger45e14432011-02-02 15:21:10 +0000341}
342
Alexander Aring20307212017-12-20 12:35:14 -0500343static int choke_change(struct Qdisc *sch, struct nlattr *opt,
344 struct netlink_ext_ack *extack)
stephen hemminger45e14432011-02-02 15:21:10 +0000345{
346 struct choke_sched_data *q = qdisc_priv(sch);
347 struct nlattr *tb[TCA_CHOKE_MAX + 1];
348 const struct tc_red_qopt *ctl;
349 int err;
350 struct sk_buff **old = NULL;
351 unsigned int mask;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000352 u32 max_P;
stephen hemminger45e14432011-02-02 15:21:10 +0000353
354 if (opt == NULL)
355 return -EINVAL;
356
Johannes Berg8cb08172019-04-26 14:07:28 +0200357 err = nla_parse_nested_deprecated(tb, TCA_CHOKE_MAX, opt,
358 choke_policy, NULL);
stephen hemminger45e14432011-02-02 15:21:10 +0000359 if (err < 0)
360 return err;
361
362 if (tb[TCA_CHOKE_PARMS] == NULL ||
363 tb[TCA_CHOKE_STAB] == NULL)
364 return -EINVAL;
365
Eric Dumazeta73ed262011-12-09 02:46:45 +0000366 max_P = tb[TCA_CHOKE_MAX_P] ? nla_get_u32(tb[TCA_CHOKE_MAX_P]) : 0;
367
stephen hemminger45e14432011-02-02 15:21:10 +0000368 ctl = nla_data(tb[TCA_CHOKE_PARMS]);
369
Nogah Frankel8afa10c2017-12-04 13:31:11 +0200370 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog))
371 return -EINVAL;
372
stephen hemminger45e14432011-02-02 15:21:10 +0000373 if (ctl->limit > CHOKE_MAX_QUEUE)
374 return -EINVAL;
375
376 mask = roundup_pow_of_two(ctl->limit + 1) - 1;
377 if (mask != q->tab_mask) {
378 struct sk_buff **ntab;
379
Michal Hocko752ade62017-05-08 15:57:27 -0700380 ntab = kvmalloc_array((mask + 1), sizeof(struct sk_buff *), GFP_KERNEL | __GFP_ZERO);
stephen hemminger45e14432011-02-02 15:21:10 +0000381 if (!ntab)
382 return -ENOMEM;
383
384 sch_tree_lock(sch);
385 old = q->tab;
386 if (old) {
387 unsigned int oqlen = sch->q.qlen, tail = 0;
WANG Cong2ccccf52016-02-25 14:55:01 -0800388 unsigned dropped = 0;
stephen hemminger45e14432011-02-02 15:21:10 +0000389
390 while (q->head != q->tail) {
391 struct sk_buff *skb = q->tab[q->head];
392
393 q->head = (q->head + 1) & q->tab_mask;
394 if (!skb)
395 continue;
396 if (tail < mask) {
397 ntab[tail++] = skb;
398 continue;
399 }
WANG Cong2ccccf52016-02-25 14:55:01 -0800400 dropped += qdisc_pkt_len(skb);
John Fastabend25331d62014-09-28 11:53:29 -0700401 qdisc_qstats_backlog_dec(sch, skb);
stephen hemminger45e14432011-02-02 15:21:10 +0000402 --sch->q.qlen;
Eric Dumazetf9aed312016-06-13 20:21:51 -0700403 rtnl_qdisc_drop(skb, sch);
stephen hemminger45e14432011-02-02 15:21:10 +0000404 }
WANG Cong2ccccf52016-02-25 14:55:01 -0800405 qdisc_tree_reduce_backlog(sch, oqlen - sch->q.qlen, dropped);
stephen hemminger45e14432011-02-02 15:21:10 +0000406 q->head = 0;
407 q->tail = tail;
408 }
409
410 q->tab_mask = mask;
411 q->tab = ntab;
412 } else
413 sch_tree_lock(sch);
414
415 q->flags = ctl->flags;
416 q->limit = ctl->limit;
417
418 red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog,
419 ctl->Plog, ctl->Scell_log,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000420 nla_data(tb[TCA_CHOKE_STAB]),
421 max_P);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000422 red_set_vars(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000423
424 if (q->head == q->tail)
Eric Dumazeteeca6682012-01-05 02:25:16 +0000425 red_end_of_idle_period(&q->vars);
stephen hemminger45e14432011-02-02 15:21:10 +0000426
427 sch_tree_unlock(sch);
428 choke_free(old);
429 return 0;
430}
431
Alexander Aringe63d7df2017-12-20 12:35:13 -0500432static int choke_init(struct Qdisc *sch, struct nlattr *opt,
433 struct netlink_ext_ack *extack)
stephen hemminger45e14432011-02-02 15:21:10 +0000434{
Alexander Aring20307212017-12-20 12:35:14 -0500435 return choke_change(sch, opt, extack);
stephen hemminger45e14432011-02-02 15:21:10 +0000436}
437
438static int choke_dump(struct Qdisc *sch, struct sk_buff *skb)
439{
440 struct choke_sched_data *q = qdisc_priv(sch);
441 struct nlattr *opts = NULL;
442 struct tc_red_qopt opt = {
443 .limit = q->limit,
444 .flags = q->flags,
445 .qth_min = q->parms.qth_min >> q->parms.Wlog,
446 .qth_max = q->parms.qth_max >> q->parms.Wlog,
447 .Wlog = q->parms.Wlog,
448 .Plog = q->parms.Plog,
449 .Scell_log = q->parms.Scell_log,
450 };
451
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200452 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
stephen hemminger45e14432011-02-02 15:21:10 +0000453 if (opts == NULL)
454 goto nla_put_failure;
455
David S. Miller1b34ec42012-03-29 05:11:39 -0400456 if (nla_put(skb, TCA_CHOKE_PARMS, sizeof(opt), &opt) ||
457 nla_put_u32(skb, TCA_CHOKE_MAX_P, q->parms.max_P))
458 goto nla_put_failure;
stephen hemminger45e14432011-02-02 15:21:10 +0000459 return nla_nest_end(skb, opts);
460
461nla_put_failure:
462 nla_nest_cancel(skb, opts);
463 return -EMSGSIZE;
464}
465
466static int choke_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
467{
468 struct choke_sched_data *q = qdisc_priv(sch);
469 struct tc_choke_xstats st = {
470 .early = q->stats.prob_drop + q->stats.forced_drop,
471 .marked = q->stats.prob_mark + q->stats.forced_mark,
472 .pdrop = q->stats.pdrop,
473 .other = q->stats.other,
474 .matched = q->stats.matched,
475 };
476
477 return gnet_stats_copy_app(d, &st, sizeof(st));
478}
479
480static void choke_destroy(struct Qdisc *sch)
481{
482 struct choke_sched_data *q = qdisc_priv(sch);
483
stephen hemminger45e14432011-02-02 15:21:10 +0000484 choke_free(q->tab);
485}
486
stephen hemminger45e14432011-02-02 15:21:10 +0000487static struct sk_buff *choke_peek_head(struct Qdisc *sch)
488{
489 struct choke_sched_data *q = qdisc_priv(sch);
490
491 return (q->head != q->tail) ? q->tab[q->head] : NULL;
492}
493
494static struct Qdisc_ops choke_qdisc_ops __read_mostly = {
495 .id = "choke",
496 .priv_size = sizeof(struct choke_sched_data),
497
498 .enqueue = choke_enqueue,
499 .dequeue = choke_dequeue,
500 .peek = choke_peek_head,
stephen hemminger45e14432011-02-02 15:21:10 +0000501 .init = choke_init,
502 .destroy = choke_destroy,
503 .reset = choke_reset,
504 .change = choke_change,
505 .dump = choke_dump,
506 .dump_stats = choke_dump_stats,
507 .owner = THIS_MODULE,
508};
509
510static int __init choke_module_init(void)
511{
512 return register_qdisc(&choke_qdisc_ops);
513}
514
515static void __exit choke_module_exit(void)
516{
517 unregister_qdisc(&choke_qdisc_ops);
518}
519
520module_init(choke_module_init)
521module_exit(choke_module_exit)
522
523MODULE_LICENSE("GPL");