blob: 2a9b1e429ff8567a1e64b57d1f6be95cb862aadc [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_netem.c Network emulator
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
Stephen Hemminger798b6b12006-10-22 20:16:57 -07007 * 2 of the License.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 *
9 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 *
12 * Authors: Stephen Hemminger <shemminger@osdl.org>
13 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
14 */
15
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/module.h>
17#include <linux/bitops.h>
18#include <linux/types.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/netdevice.h>
22#include <linux/skbuff.h>
23#include <linux/rtnetlink.h>
24
25#include <net/pkt_sched.h>
26
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080027#define VERSION "1.2"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/* Network Emulation Queuing algorithm.
30 ====================================
31
32 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
33 Network Emulation Tool
34 [2] Luigi Rizzo, DummyNet for FreeBSD
35
36 ----------------------------------------------------------------
37
38 This started out as a simple way to delay outgoing packets to
39 test TCP but has grown to include most of the functionality
40 of a full blown network emulator like NISTnet. It can delay
41 packets and add random jitter (and correlation). The random
42 distribution can be loaded from a table as well to provide
43 normal, Pareto, or experimental curves. Packet loss,
44 duplication, and reordering can also be emulated.
45
46 This qdisc does not do classification that can be handled in
47 layering other disciplines. It does not need to do bandwidth
48 control either since that can be handled by using token
49 bucket or other rate control.
50
51 The simulator is limited by the Linux timer resolution
52 and will create packet bursts on the HZ boundary (1ms).
53*/
54
55struct netem_sched_data {
56 struct Qdisc *qdisc;
Patrick McHardy59cb5c62007-03-16 01:20:31 -070057 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59 u32 latency;
60 u32 loss;
61 u32 limit;
62 u32 counter;
63 u32 gap;
64 u32 jitter;
65 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070066 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080067 u32 corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070068
69 struct crndstate {
70 unsigned long last;
71 unsigned long rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080072 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -070073
74 struct disttable {
75 u32 size;
76 s16 table[0];
77 } *delay_dist;
78};
79
80/* Time stamp put into socket buffer control block */
81struct netem_skb_cb {
82 psched_time_t time_to_send;
83};
84
85/* init_crandom - initialize correlated random number generator
86 * Use entropy source for initial seed.
87 */
88static void init_crandom(struct crndstate *state, unsigned long rho)
89{
90 state->rho = rho;
91 state->last = net_random();
92}
93
94/* get_crandom - correlated random number generator
95 * Next number depends on last value.
96 * rho is scaled to avoid floating point.
97 */
98static unsigned long get_crandom(struct crndstate *state)
99{
100 u64 value, rho;
101 unsigned long answer;
102
103 if (state->rho == 0) /* no correllation */
104 return net_random();
105
106 value = net_random();
107 rho = (u64)state->rho + 1;
108 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
109 state->last = answer;
110 return answer;
111}
112
113/* tabledist - return a pseudo-randomly distributed value with mean mu and
114 * std deviation sigma. Uses table lookup to approximate the desired
115 * distribution, and a uniformly-distributed pseudo-random source.
116 */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900117static long tabledist(unsigned long mu, long sigma,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 struct crndstate *state, const struct disttable *dist)
119{
120 long t, x;
121 unsigned long rnd;
122
123 if (sigma == 0)
124 return mu;
125
126 rnd = get_crandom(state);
127
128 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900129 if (dist == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 return (rnd % (2*sigma)) - sigma + mu;
131
132 t = dist->table[rnd % dist->size];
133 x = (sigma % NETEM_DIST_SCALE) * t;
134 if (x >= 0)
135 x += NETEM_DIST_SCALE/2;
136 else
137 x -= NETEM_DIST_SCALE/2;
138
139 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
140}
141
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700142/*
143 * Insert one skb into qdisc.
144 * Note: parent depends on return value to account for queue length.
145 * NET_XMIT_DROP: queue length didn't change.
146 * NET_XMIT_SUCCESS: one skb was queued.
147 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
149{
150 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700151 /* We don't fill cb now as skb_unshare() may invalidate it */
152 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700153 struct sk_buff *skb2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 int ret;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700155 int count = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
Stephen Hemminger771018e2005-05-03 16:24:32 -0700157 pr_debug("netem_enqueue skb=%p\n", skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700159 /* Random duplication */
160 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
161 ++count;
162
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 /* Random packet drop 0 => none, ~0 => all */
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700164 if (q->loss && q->loss >= get_crandom(&q->loss_cor))
165 --count;
166
167 if (count == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 sch->qstats.drops++;
169 kfree_skb(skb);
Stephen Hemminger89bbb0a32006-04-28 12:11:36 -0700170 return NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 }
172
David S. Miller4e8a5202006-10-22 21:00:33 -0700173 skb_orphan(skb);
174
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700175 /*
176 * If we need to duplicate packet, then re-insert at top of the
177 * qdisc tree, since parent queuer expects that only one
178 * skb will be queued.
179 */
180 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
181 struct Qdisc *rootq = sch->dev->qdisc;
182 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
183 q->duplicate = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700185 rootq->enqueue(skb2, rootq);
186 q->duplicate = dupsave;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 }
188
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800189 /*
190 * Randomized packet corruption.
191 * Make copy if needed since we are modifying
192 * If packet is going to be hardware checksummed, then
193 * do it now in software before we mangle it.
194 */
195 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
196 if (!(skb = skb_unshare(skb, GFP_ATOMIC))
Patrick McHardy84fa7932006-08-29 16:44:56 -0700197 || (skb->ip_summed == CHECKSUM_PARTIAL
198 && skb_checksum_help(skb))) {
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800199 sch->qstats.drops++;
200 return NET_XMIT_DROP;
201 }
202
203 skb->data[net_random() % skb_headlen(skb)] ^= 1<<(net_random() % 8);
204 }
205
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700206 cb = (struct netem_skb_cb *)skb->cb;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700207 if (q->gap == 0 /* not doing reordering */
208 || q->counter < q->gap /* inside last reordering gap */
209 || q->reorder < get_crandom(&q->reorder_cor)) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700210 psched_time_t now;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800211 psched_tdiff_t delay;
212
213 delay = tabledist(q->latency, q->jitter,
214 &q->delay_cor, q->delay_dist);
215
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700216 PSCHED_GET_TIME(now);
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800217 PSCHED_TADD2(now, delay, cb->time_to_send);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 ++q->counter;
219 ret = q->qdisc->enqueue(skb, q->qdisc);
220 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900221 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700222 * Do re-ordering by putting one out of N packets at the front
223 * of the queue.
224 */
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700225 PSCHED_GET_TIME(cb->time_to_send);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700226 q->counter = 0;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700227 ret = q->qdisc->ops->requeue(skb, q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 }
229
230 if (likely(ret == NET_XMIT_SUCCESS)) {
231 sch->q.qlen++;
232 sch->bstats.bytes += skb->len;
233 sch->bstats.packets++;
234 } else
235 sch->qstats.drops++;
236
Stephen Hemmingerd5d75cd2005-05-03 16:24:57 -0700237 pr_debug("netem: enqueue ret %d\n", ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 return ret;
239}
240
241/* Requeue packets but don't change time stamp */
242static int netem_requeue(struct sk_buff *skb, struct Qdisc *sch)
243{
244 struct netem_sched_data *q = qdisc_priv(sch);
245 int ret;
246
247 if ((ret = q->qdisc->ops->requeue(skb, q->qdisc)) == 0) {
248 sch->q.qlen++;
249 sch->qstats.requeues++;
250 }
251
252 return ret;
253}
254
255static unsigned int netem_drop(struct Qdisc* sch)
256{
257 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardy6d037a22006-03-20 19:00:49 -0800258 unsigned int len = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
Patrick McHardy6d037a22006-03-20 19:00:49 -0800260 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 sch->q.qlen--;
262 sch->qstats.drops++;
263 }
264 return len;
265}
266
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267static struct sk_buff *netem_dequeue(struct Qdisc *sch)
268{
269 struct netem_sched_data *q = qdisc_priv(sch);
270 struct sk_buff *skb;
271
272 skb = q->qdisc->dequeue(q->qdisc);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700273 if (skb) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700274 const struct netem_skb_cb *cb
275 = (const struct netem_skb_cb *)skb->cb;
276 psched_time_t now;
Stephen Hemminger771018e2005-05-03 16:24:32 -0700277
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700278 /* if more time remaining? */
279 PSCHED_GET_TIME(now);
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800280
281 if (PSCHED_TLESS(cb->time_to_send, now)) {
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700282 pr_debug("netem_dequeue: return skb=%p\n", skb);
283 sch->q.qlen--;
284 sch->flags &= ~TCQ_F_THROTTLED;
285 return skb;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800286 } else {
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700287 qdisc_watchdog_schedule(&q->watchdog, cb->time_to_send);
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800288
289 if (q->qdisc->ops->requeue(skb, q->qdisc) != NET_XMIT_SUCCESS) {
Patrick McHardye488eaf2006-11-29 17:37:42 -0800290 qdisc_tree_decrease_qlen(q->qdisc, 1);
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800291 sch->qstats.drops++;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800292 printk(KERN_ERR "netem: queue discpline %s could not requeue\n",
293 q->qdisc->ops->id);
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800294 }
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700295 }
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700296 }
297
298 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301static void netem_reset(struct Qdisc *sch)
302{
303 struct netem_sched_data *q = qdisc_priv(sch);
304
305 qdisc_reset(q->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306 sch->q.qlen = 0;
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700307 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308}
309
Stephen Hemminger300ce172005-10-30 13:47:34 -0800310/* Pass size change message down to embedded FIFO */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311static int set_fifo_limit(struct Qdisc *q, int limit)
312{
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900313 struct rtattr *rta;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 int ret = -ENOMEM;
315
Stephen Hemminger300ce172005-10-30 13:47:34 -0800316 /* Hack to avoid sending change message to non-FIFO */
317 if (strncmp(q->ops->id + 1, "fifo", 4) != 0)
318 return 0;
319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 rta = kmalloc(RTA_LENGTH(sizeof(struct tc_fifo_qopt)), GFP_KERNEL);
321 if (rta) {
322 rta->rta_type = RTM_NEWQDISC;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900323 rta->rta_len = RTA_LENGTH(sizeof(struct tc_fifo_qopt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 ((struct tc_fifo_qopt *)RTA_DATA(rta))->limit = limit;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900325
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 ret = q->ops->change(q, rta);
327 kfree(rta);
328 }
329 return ret;
330}
331
332/*
333 * Distribution data is a variable size payload containing
334 * signed 16 bit values.
335 */
336static int get_dist_table(struct Qdisc *sch, const struct rtattr *attr)
337{
338 struct netem_sched_data *q = qdisc_priv(sch);
339 unsigned long n = RTA_PAYLOAD(attr)/sizeof(__s16);
340 const __s16 *data = RTA_DATA(attr);
341 struct disttable *d;
342 int i;
343
344 if (n > 65536)
345 return -EINVAL;
346
347 d = kmalloc(sizeof(*d) + n*sizeof(d->table[0]), GFP_KERNEL);
348 if (!d)
349 return -ENOMEM;
350
351 d->size = n;
352 for (i = 0; i < n; i++)
353 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900354
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 spin_lock_bh(&sch->dev->queue_lock);
356 d = xchg(&q->delay_dist, d);
357 spin_unlock_bh(&sch->dev->queue_lock);
358
359 kfree(d);
360 return 0;
361}
362
363static int get_correlation(struct Qdisc *sch, const struct rtattr *attr)
364{
365 struct netem_sched_data *q = qdisc_priv(sch);
366 const struct tc_netem_corr *c = RTA_DATA(attr);
367
368 if (RTA_PAYLOAD(attr) != sizeof(*c))
369 return -EINVAL;
370
371 init_crandom(&q->delay_cor, c->delay_corr);
372 init_crandom(&q->loss_cor, c->loss_corr);
373 init_crandom(&q->dup_cor, c->dup_corr);
374 return 0;
375}
376
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700377static int get_reorder(struct Qdisc *sch, const struct rtattr *attr)
378{
379 struct netem_sched_data *q = qdisc_priv(sch);
380 const struct tc_netem_reorder *r = RTA_DATA(attr);
381
382 if (RTA_PAYLOAD(attr) != sizeof(*r))
383 return -EINVAL;
384
385 q->reorder = r->probability;
386 init_crandom(&q->reorder_cor, r->correlation);
387 return 0;
388}
389
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800390static int get_corrupt(struct Qdisc *sch, const struct rtattr *attr)
391{
392 struct netem_sched_data *q = qdisc_priv(sch);
393 const struct tc_netem_corrupt *r = RTA_DATA(attr);
394
395 if (RTA_PAYLOAD(attr) != sizeof(*r))
396 return -EINVAL;
397
398 q->corrupt = r->probability;
399 init_crandom(&q->corrupt_cor, r->correlation);
400 return 0;
401}
402
403/* Parse netlink message to set options */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404static int netem_change(struct Qdisc *sch, struct rtattr *opt)
405{
406 struct netem_sched_data *q = qdisc_priv(sch);
407 struct tc_netem_qopt *qopt;
408 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900409
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 if (opt == NULL || RTA_PAYLOAD(opt) < sizeof(*qopt))
411 return -EINVAL;
412
413 qopt = RTA_DATA(opt);
414 ret = set_fifo_limit(q->qdisc, qopt->limit);
415 if (ret) {
416 pr_debug("netem: can't set fifo limit\n");
417 return ret;
418 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900419
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 q->latency = qopt->latency;
421 q->jitter = qopt->jitter;
422 q->limit = qopt->limit;
423 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700424 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 q->loss = qopt->loss;
426 q->duplicate = qopt->duplicate;
427
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700428 /* for compatiablity with earlier versions.
429 * if gap is set, need to assume 100% probablity
430 */
431 q->reorder = ~0;
432
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 /* Handle nested options after initial queue options.
434 * Should have put all options in nested format but too late now.
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900435 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 if (RTA_PAYLOAD(opt) > sizeof(*qopt)) {
437 struct rtattr *tb[TCA_NETEM_MAX];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900438 if (rtattr_parse(tb, TCA_NETEM_MAX,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 RTA_DATA(opt) + sizeof(*qopt),
440 RTA_PAYLOAD(opt) - sizeof(*qopt)))
441 return -EINVAL;
442
443 if (tb[TCA_NETEM_CORR-1]) {
444 ret = get_correlation(sch, tb[TCA_NETEM_CORR-1]);
445 if (ret)
446 return ret;
447 }
448
449 if (tb[TCA_NETEM_DELAY_DIST-1]) {
450 ret = get_dist_table(sch, tb[TCA_NETEM_DELAY_DIST-1]);
451 if (ret)
452 return ret;
453 }
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800454
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700455 if (tb[TCA_NETEM_REORDER-1]) {
456 ret = get_reorder(sch, tb[TCA_NETEM_REORDER-1]);
457 if (ret)
458 return ret;
459 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800461 if (tb[TCA_NETEM_CORRUPT-1]) {
462 ret = get_corrupt(sch, tb[TCA_NETEM_CORRUPT-1]);
463 if (ret)
464 return ret;
465 }
466 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467
468 return 0;
469}
470
Stephen Hemminger300ce172005-10-30 13:47:34 -0800471/*
472 * Special case version of FIFO queue for use by netem.
473 * It queues in order based on timestamps in skb's
474 */
475struct fifo_sched_data {
476 u32 limit;
477};
478
479static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
480{
481 struct fifo_sched_data *q = qdisc_priv(sch);
482 struct sk_buff_head *list = &sch->q;
483 const struct netem_skb_cb *ncb
484 = (const struct netem_skb_cb *)nskb->cb;
485 struct sk_buff *skb;
486
487 if (likely(skb_queue_len(list) < q->limit)) {
488 skb_queue_reverse_walk(list, skb) {
489 const struct netem_skb_cb *cb
490 = (const struct netem_skb_cb *)skb->cb;
491
Andrea Bittauaa875162005-11-20 13:41:05 -0800492 if (!PSCHED_TLESS(ncb->time_to_send, cb->time_to_send))
Stephen Hemminger300ce172005-10-30 13:47:34 -0800493 break;
494 }
495
496 __skb_queue_after(list, skb, nskb);
497
498 sch->qstats.backlog += nskb->len;
499 sch->bstats.bytes += nskb->len;
500 sch->bstats.packets++;
501
502 return NET_XMIT_SUCCESS;
503 }
504
505 return qdisc_drop(nskb, sch);
506}
507
508static int tfifo_init(struct Qdisc *sch, struct rtattr *opt)
509{
510 struct fifo_sched_data *q = qdisc_priv(sch);
511
512 if (opt) {
513 struct tc_fifo_qopt *ctl = RTA_DATA(opt);
514 if (RTA_PAYLOAD(opt) < sizeof(*ctl))
515 return -EINVAL;
516
517 q->limit = ctl->limit;
518 } else
519 q->limit = max_t(u32, sch->dev->tx_queue_len, 1);
520
521 return 0;
522}
523
524static int tfifo_dump(struct Qdisc *sch, struct sk_buff *skb)
525{
526 struct fifo_sched_data *q = qdisc_priv(sch);
527 struct tc_fifo_qopt opt = { .limit = q->limit };
528
529 RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
530 return skb->len;
531
532rtattr_failure:
533 return -1;
534}
535
536static struct Qdisc_ops tfifo_qdisc_ops = {
537 .id = "tfifo",
538 .priv_size = sizeof(struct fifo_sched_data),
539 .enqueue = tfifo_enqueue,
540 .dequeue = qdisc_dequeue_head,
541 .requeue = qdisc_requeue,
542 .drop = qdisc_queue_drop,
543 .init = tfifo_init,
544 .reset = qdisc_reset_queue,
545 .change = tfifo_init,
546 .dump = tfifo_dump,
547};
548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549static int netem_init(struct Qdisc *sch, struct rtattr *opt)
550{
551 struct netem_sched_data *q = qdisc_priv(sch);
552 int ret;
553
554 if (!opt)
555 return -EINVAL;
556
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700557 qdisc_watchdog_init(&q->watchdog, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
Patrick McHardy9f9afec2006-11-29 17:35:18 -0800559 q->qdisc = qdisc_create_dflt(sch->dev, &tfifo_qdisc_ops,
560 TC_H_MAKE(sch->handle, 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 if (!q->qdisc) {
562 pr_debug("netem: qdisc create failed\n");
563 return -ENOMEM;
564 }
565
566 ret = netem_change(sch, opt);
567 if (ret) {
568 pr_debug("netem: change failed\n");
569 qdisc_destroy(q->qdisc);
570 }
571 return ret;
572}
573
574static void netem_destroy(struct Qdisc *sch)
575{
576 struct netem_sched_data *q = qdisc_priv(sch);
577
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700578 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 qdisc_destroy(q->qdisc);
580 kfree(q->delay_dist);
581}
582
583static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
584{
585 const struct netem_sched_data *q = qdisc_priv(sch);
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700586 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 struct rtattr *rta = (struct rtattr *) b;
588 struct tc_netem_qopt qopt;
589 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700590 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800591 struct tc_netem_corrupt corrupt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
593 qopt.latency = q->latency;
594 qopt.jitter = q->jitter;
595 qopt.limit = q->limit;
596 qopt.loss = q->loss;
597 qopt.gap = q->gap;
598 qopt.duplicate = q->duplicate;
599 RTA_PUT(skb, TCA_OPTIONS, sizeof(qopt), &qopt);
600
601 cor.delay_corr = q->delay_cor.rho;
602 cor.loss_corr = q->loss_cor.rho;
603 cor.dup_corr = q->dup_cor.rho;
604 RTA_PUT(skb, TCA_NETEM_CORR, sizeof(cor), &cor);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700605
606 reorder.probability = q->reorder;
607 reorder.correlation = q->reorder_cor.rho;
608 RTA_PUT(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder);
609
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800610 corrupt.probability = q->corrupt;
611 corrupt.correlation = q->corrupt_cor.rho;
612 RTA_PUT(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt);
613
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -0700614 rta->rta_len = skb_tail_pointer(skb) - b;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
616 return skb->len;
617
618rtattr_failure:
619 skb_trim(skb, b - skb->data);
620 return -1;
621}
622
623static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
624 struct sk_buff *skb, struct tcmsg *tcm)
625{
626 struct netem_sched_data *q = qdisc_priv(sch);
627
628 if (cl != 1) /* only one class */
629 return -ENOENT;
630
631 tcm->tcm_handle |= TC_H_MIN(1);
632 tcm->tcm_info = q->qdisc->handle;
633
634 return 0;
635}
636
637static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
638 struct Qdisc **old)
639{
640 struct netem_sched_data *q = qdisc_priv(sch);
641
642 if (new == NULL)
643 new = &noop_qdisc;
644
645 sch_tree_lock(sch);
646 *old = xchg(&q->qdisc, new);
Patrick McHardy5e50da02006-11-29 17:36:20 -0800647 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 qdisc_reset(*old);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 sch_tree_unlock(sch);
650
651 return 0;
652}
653
654static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
655{
656 struct netem_sched_data *q = qdisc_priv(sch);
657 return q->qdisc;
658}
659
660static unsigned long netem_get(struct Qdisc *sch, u32 classid)
661{
662 return 1;
663}
664
665static void netem_put(struct Qdisc *sch, unsigned long arg)
666{
667}
668
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900669static int netem_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 struct rtattr **tca, unsigned long *arg)
671{
672 return -ENOSYS;
673}
674
675static int netem_delete(struct Qdisc *sch, unsigned long arg)
676{
677 return -ENOSYS;
678}
679
680static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
681{
682 if (!walker->stop) {
683 if (walker->count >= walker->skip)
684 if (walker->fn(sch, 1, walker) < 0) {
685 walker->stop = 1;
686 return;
687 }
688 walker->count++;
689 }
690}
691
692static struct tcf_proto **netem_find_tcf(struct Qdisc *sch, unsigned long cl)
693{
694 return NULL;
695}
696
697static struct Qdisc_class_ops netem_class_ops = {
698 .graft = netem_graft,
699 .leaf = netem_leaf,
700 .get = netem_get,
701 .put = netem_put,
702 .change = netem_change_class,
703 .delete = netem_delete,
704 .walk = netem_walk,
705 .tcf_chain = netem_find_tcf,
706 .dump = netem_dump_class,
707};
708
709static struct Qdisc_ops netem_qdisc_ops = {
710 .id = "netem",
711 .cl_ops = &netem_class_ops,
712 .priv_size = sizeof(struct netem_sched_data),
713 .enqueue = netem_enqueue,
714 .dequeue = netem_dequeue,
715 .requeue = netem_requeue,
716 .drop = netem_drop,
717 .init = netem_init,
718 .reset = netem_reset,
719 .destroy = netem_destroy,
720 .change = netem_change,
721 .dump = netem_dump,
722 .owner = THIS_MODULE,
723};
724
725
726static int __init netem_module_init(void)
727{
Stephen Hemmingereb229c42005-11-03 13:49:01 -0800728 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 return register_qdisc(&netem_qdisc_ops);
730}
731static void __exit netem_module_exit(void)
732{
733 unregister_qdisc(&netem_qdisc_ops);
734}
735module_init(netem_module_init)
736module_exit(netem_module_exit)
737MODULE_LICENSE("GPL");