blob: 956ff3da81f4c34c807759825ac160ef2aaae51c [file] [log] [blame]
Thomas Gleixner84a14ae2019-05-28 09:57:07 -07001// SPDX-License-Identifier: GPL-2.0-only
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net/sched/sch_netem.c Network emulator
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Many of the algorithms and ideas for this came from
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09006 * NIST Net which is not copyrighted.
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 *
8 * Authors: Stephen Hemminger <shemminger@osdl.org>
9 * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro>
10 */
11
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000012#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/types.h>
16#include <linux/kernel.h>
17#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/skbuff.h>
David S. Miller78776d32011-02-24 22:48:13 -080019#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/rtnetlink.h>
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000021#include <linux/reciprocal_div.h>
Eric Dumazetaec0a402013-06-28 07:40:57 -070022#include <linux/rbtree.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -070024#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <net/pkt_sched.h>
Eric Dumazete4ae0042012-04-30 23:11:05 +000026#include <net/inet_ecn.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
stephen hemminger250a65f2011-02-23 13:04:22 +000028#define VERSION "1.3"
Stephen Hemmingereb229c42005-11-03 13:49:01 -080029
Linus Torvalds1da177e2005-04-16 15:20:36 -070030/* Network Emulation Queuing algorithm.
31 ====================================
32
33 Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based
34 Network Emulation Tool
35 [2] Luigi Rizzo, DummyNet for FreeBSD
36
37 ----------------------------------------------------------------
38
39 This started out as a simple way to delay outgoing packets to
40 test TCP but has grown to include most of the functionality
41 of a full blown network emulator like NISTnet. It can delay
42 packets and add random jitter (and correlation). The random
43 distribution can be loaded from a table as well to provide
44 normal, Pareto, or experimental curves. Packet loss,
45 duplication, and reordering can also be emulated.
46
47 This qdisc does not do classification that can be handled in
48 layering other disciplines. It does not need to do bandwidth
49 control either since that can be handled by using token
50 bucket or other rate control.
stephen hemminger661b7972011-02-23 13:04:21 +000051
52 Correlated Loss Generator models
53
54 Added generation of correlated loss according to the
55 "Gilbert-Elliot" model, a 4-state markov model.
56
57 References:
58 [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG
59 [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general
60 and intuitive loss model for packet networks and its implementation
61 in the Netem module in the Linux kernel", available in [1]
62
63 Authors: Stefano Salsano <stefano.salsano at uniroma2.it
64 Fabio Ludovici <fabio.ludovici at yahoo.it>
Linus Torvalds1da177e2005-04-16 15:20:36 -070065*/
66
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -070067struct disttable {
68 u32 size;
69 s16 table[0];
70};
71
Linus Torvalds1da177e2005-04-16 15:20:36 -070072struct netem_sched_data {
Eric Dumazetaec0a402013-06-28 07:40:57 -070073 /* internal t(ime)fifo qdisc uses t_root and sch->limit */
74 struct rb_root t_root;
Eric Dumazet50612532011-12-28 23:12:02 +000075
Peter Oskolkovd66280b2018-12-04 11:55:56 -080076 /* a linear queue; reduces rbtree rebalancing when jitter is low */
77 struct sk_buff *t_head;
78 struct sk_buff *t_tail;
79
Eric Dumazet50612532011-12-28 23:12:02 +000080 /* optional qdisc for classful handling (NULL at netem init) */
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 struct Qdisc *qdisc;
Eric Dumazet50612532011-12-28 23:12:02 +000082
Patrick McHardy59cb5c62007-03-16 01:20:31 -070083 struct qdisc_watchdog watchdog;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
Dave Taht112f9cb2017-11-08 15:12:26 -080085 s64 latency;
86 s64 jitter;
Stephen Hemmingerb4076212007-03-22 12:16:21 -070087
Linus Torvalds1da177e2005-04-16 15:20:36 -070088 u32 loss;
Eric Dumazete4ae0042012-04-30 23:11:05 +000089 u32 ecn;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 u32 limit;
91 u32 counter;
92 u32 gap;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 u32 duplicate;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -070094 u32 reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -080095 u32 corrupt;
Yang Yingliang6a031f62013-12-25 17:35:15 +080096 u64 rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +000097 s32 packet_overhead;
98 u32 cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +010099 struct reciprocal_value cell_size_reciprocal;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000100 s32 cell_overhead;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 struct crndstate {
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700103 u32 last;
104 u32 rho;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800105 } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700107 struct disttable *delay_dist;
stephen hemminger661b7972011-02-23 13:04:21 +0000108
109 enum {
110 CLG_RANDOM,
111 CLG_4_STATES,
112 CLG_GILB_ELL,
113 } loss_model;
114
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800115 enum {
116 TX_IN_GAP_PERIOD = 1,
117 TX_IN_BURST_PERIOD,
118 LOST_IN_GAP_PERIOD,
119 LOST_IN_BURST_PERIOD,
120 } _4_state_model;
121
Yang Yingliangc045a732014-02-14 10:30:43 +0800122 enum {
123 GOOD_STATE = 1,
124 BAD_STATE,
125 } GE_state_model;
126
stephen hemminger661b7972011-02-23 13:04:21 +0000127 /* Correlated Loss Generation models */
128 struct clgstate {
129 /* state of the Markov chain */
130 u8 state;
131
132 /* 4-states and Gilbert-Elliot models */
133 u32 a1; /* p13 for 4-states or p for GE */
134 u32 a2; /* p31 for 4-states or r for GE */
135 u32 a3; /* p32 for 4-states or h for GE */
136 u32 a4; /* p14 for 4-states or 1-k for GE */
137 u32 a5; /* p23 used only in 4-states */
138 } clg;
139
Dave Taht836af832017-11-08 15:12:28 -0800140 struct tc_netem_slot slot_config;
141 struct slotstate {
142 u64 slot_next;
143 s32 packets_left;
144 s32 bytes_left;
145 } slot;
146
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700147 struct disttable *slot_dist;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148};
149
Eric Dumazet50612532011-12-28 23:12:02 +0000150/* Time stamp put into socket buffer control block
151 * Only valid when skbs are in our internal t(ime)fifo queue.
Eric Dumazet56b17422014-11-03 08:19:53 -0800152 *
153 * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp,
154 * and skb->next & skb->prev are scratch space for a qdisc,
155 * we save skb->tstamp value in skb->cb[] before destroying it.
Eric Dumazet50612532011-12-28 23:12:02 +0000156 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157struct netem_skb_cb {
Dave Taht112f9cb2017-11-08 15:12:26 -0800158 u64 time_to_send;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159};
160
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700161static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb)
162{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700163 /* we assume we can use skb next/prev/tstamp as storage for rb_node */
David S. Miller16bda132012-02-06 15:14:37 -0500164 qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb));
Jussi Kivilinna175f9c12008-07-20 00:08:47 -0700165 return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data;
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700166}
167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168/* init_crandom - initialize correlated random number generator
169 * Use entropy source for initial seed.
170 */
171static void init_crandom(struct crndstate *state, unsigned long rho)
172{
173 state->rho = rho;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500174 state->last = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
177/* get_crandom - correlated random number generator
178 * Next number depends on last value.
179 * rho is scaled to avoid floating point.
180 */
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700181static u32 get_crandom(struct crndstate *state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
183 u64 value, rho;
184 unsigned long answer;
185
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700186 if (!state || state->rho == 0) /* no correlation */
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500187 return prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500189 value = prandom_u32();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 rho = (u64)state->rho + 1;
191 answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32;
192 state->last = answer;
193 return answer;
194}
195
stephen hemminger661b7972011-02-23 13:04:21 +0000196/* loss_4state - 4-state model loss generator
197 * Generates losses according to the 4-state Markov chain adopted in
198 * the GI (General and Intuitive) loss model.
199 */
200static bool loss_4state(struct netem_sched_data *q)
201{
202 struct clgstate *clg = &q->clg;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500203 u32 rnd = prandom_u32();
stephen hemminger661b7972011-02-23 13:04:21 +0000204
205 /*
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300206 * Makes a comparison between rnd and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000207 * probabilities outgoing from the current state, then decides the
208 * next state and if the next packet has to be transmitted or lost.
209 * The four states correspond to:
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800210 * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period
211 * LOST_IN_BURST_PERIOD => isolated losses within a gap period
212 * LOST_IN_GAP_PERIOD => lost packets within a burst period
213 * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period
stephen hemminger661b7972011-02-23 13:04:21 +0000214 */
215 switch (clg->state) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800216 case TX_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000217 if (rnd < clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800218 clg->state = LOST_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000219 return true;
stephen hemmingerab6c27b2013-11-29 11:03:35 -0800220 } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800221 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000222 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800223 } else if (clg->a1 + clg->a4 < rnd) {
224 clg->state = TX_IN_GAP_PERIOD;
225 }
stephen hemminger661b7972011-02-23 13:04:21 +0000226
227 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800228 case TX_IN_BURST_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000229 if (rnd < clg->a5) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800230 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000231 return true;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800232 } else {
233 clg->state = TX_IN_BURST_PERIOD;
234 }
stephen hemminger661b7972011-02-23 13:04:21 +0000235
236 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800237 case LOST_IN_GAP_PERIOD:
stephen hemminger661b7972011-02-23 13:04:21 +0000238 if (rnd < clg->a3)
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800239 clg->state = TX_IN_BURST_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000240 else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800241 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000242 } else if (clg->a2 + clg->a3 < rnd) {
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800243 clg->state = LOST_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000244 return true;
245 }
246 break;
Yang Yinglianga6e2fe12014-01-18 18:13:31 +0800247 case LOST_IN_BURST_PERIOD:
248 clg->state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000249 break;
250 }
251
252 return false;
253}
254
255/* loss_gilb_ell - Gilbert-Elliot model loss generator
256 * Generates losses according to the Gilbert-Elliot loss model or
257 * its special cases (Gilbert or Simple Gilbert)
258 *
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300259 * Makes a comparison between random number and the transition
stephen hemminger661b7972011-02-23 13:04:21 +0000260 * probabilities outgoing from the current state, then decides the
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300261 * next state. A second random number is extracted and the comparison
stephen hemminger661b7972011-02-23 13:04:21 +0000262 * with the loss probability of the current state decides if the next
263 * packet will be transmitted or lost.
264 */
265static bool loss_gilb_ell(struct netem_sched_data *q)
266{
267 struct clgstate *clg = &q->clg;
268
269 switch (clg->state) {
Yang Yingliangc045a732014-02-14 10:30:43 +0800270 case GOOD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500271 if (prandom_u32() < clg->a1)
Yang Yingliangc045a732014-02-14 10:30:43 +0800272 clg->state = BAD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500273 if (prandom_u32() < clg->a4)
stephen hemminger661b7972011-02-23 13:04:21 +0000274 return true;
stephen hemminger7c2781f2013-11-29 11:02:43 -0800275 break;
Yang Yingliangc045a732014-02-14 10:30:43 +0800276 case BAD_STATE:
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500277 if (prandom_u32() < clg->a2)
Yang Yingliangc045a732014-02-14 10:30:43 +0800278 clg->state = GOOD_STATE;
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500279 if (prandom_u32() > clg->a3)
stephen hemminger661b7972011-02-23 13:04:21 +0000280 return true;
281 }
282
283 return false;
284}
285
286static bool loss_event(struct netem_sched_data *q)
287{
288 switch (q->loss_model) {
289 case CLG_RANDOM:
290 /* Random packet drop 0 => none, ~0 => all */
291 return q->loss && q->loss >= get_crandom(&q->loss_cor);
292
293 case CLG_4_STATES:
294 /* 4state loss model algorithm (used also for GI model)
295 * Extracts a value from the markov 4 state loss generator,
296 * if it is 1 drops a packet and if needed writes the event in
297 * the kernel logs
298 */
299 return loss_4state(q);
300
301 case CLG_GILB_ELL:
302 /* Gilbert-Elliot loss model algorithm
303 * Extracts a value from the Gilbert-Elliot loss generator,
304 * if it is 1 drops a packet and if needed writes the event in
305 * the kernel logs
306 */
307 return loss_gilb_ell(q);
308 }
309
310 return false; /* not reached */
311}
312
313
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314/* tabledist - return a pseudo-randomly distributed value with mean mu and
315 * std deviation sigma. Uses table lookup to approximate the desired
316 * distribution, and a uniformly-distributed pseudo-random source.
317 */
Stephen Hemminger9b0ed892017-11-14 11:27:02 -0800318static s64 tabledist(s64 mu, s32 sigma,
Dave Taht112f9cb2017-11-08 15:12:26 -0800319 struct crndstate *state,
Stephen Hemminger9b0ed892017-11-14 11:27:02 -0800320 const struct disttable *dist)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
Dave Taht112f9cb2017-11-08 15:12:26 -0800322 s64 x;
Stephen Hemmingerb4076212007-03-22 12:16:21 -0700323 long t;
324 u32 rnd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325
326 if (sigma == 0)
327 return mu;
328
329 rnd = get_crandom(state);
330
331 /* default uniform distribution */
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900332 if (dist == NULL)
Md. Islam043e3372018-02-06 23:14:18 -0500333 return ((rnd % (2 * sigma)) + mu) - sigma;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 t = dist->table[rnd % dist->size];
336 x = (sigma % NETEM_DIST_SCALE) * t;
337 if (x >= 0)
338 x += NETEM_DIST_SCALE/2;
339 else
340 x -= NETEM_DIST_SCALE/2;
341
342 return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu;
343}
344
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800345static u64 packet_time_ns(u64 len, const struct netem_sched_data *q)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000346{
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000347 len += q->packet_overhead;
348
349 if (q->cell_size) {
350 u32 cells = reciprocal_divide(len, q->cell_size_reciprocal);
351
352 if (len > cells * q->cell_size) /* extra cell needed for remainder */
353 cells++;
354 len = cells * (q->cell_size + q->cell_overhead);
355 }
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800356
357 return div64_u64(len * NSEC_PER_SEC, q->rate);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000358}
359
stephen hemmingerff704052013-10-06 15:16:49 -0700360static void tfifo_reset(struct Qdisc *sch)
361{
362 struct netem_sched_data *q = qdisc_priv(sch);
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700363 struct rb_node *p = rb_first(&q->t_root);
stephen hemmingerff704052013-10-06 15:16:49 -0700364
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700365 while (p) {
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700366 struct sk_buff *skb = rb_to_skb(p);
stephen hemmingerff704052013-10-06 15:16:49 -0700367
Eric Dumazet3aa605f2017-09-23 11:07:28 -0700368 p = rb_next(p);
369 rb_erase(&skb->rbnode, &q->t_root);
Eric Dumazet2f08a9a2016-06-13 20:21:57 -0700370 rtnl_kfree_skbs(skb, skb);
stephen hemmingerff704052013-10-06 15:16:49 -0700371 }
Peter Oskolkovd66280b2018-12-04 11:55:56 -0800372
373 rtnl_kfree_skbs(q->t_head, q->t_tail);
374 q->t_head = NULL;
375 q->t_tail = NULL;
stephen hemmingerff704052013-10-06 15:16:49 -0700376}
377
Eric Dumazet960fb662012-07-03 20:55:21 +0000378static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
Eric Dumazet50612532011-12-28 23:12:02 +0000379{
Eric Dumazetaec0a402013-06-28 07:40:57 -0700380 struct netem_sched_data *q = qdisc_priv(sch);
Dave Taht112f9cb2017-11-08 15:12:26 -0800381 u64 tnext = netem_skb_cb(nskb)->time_to_send;
Eric Dumazet50612532011-12-28 23:12:02 +0000382
Peter Oskolkovd66280b2018-12-04 11:55:56 -0800383 if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) {
384 if (q->t_tail)
385 q->t_tail->next = nskb;
Eric Dumazetaec0a402013-06-28 07:40:57 -0700386 else
Peter Oskolkovd66280b2018-12-04 11:55:56 -0800387 q->t_head = nskb;
388 q->t_tail = nskb;
389 } else {
390 struct rb_node **p = &q->t_root.rb_node, *parent = NULL;
391
392 while (*p) {
393 struct sk_buff *skb;
394
395 parent = *p;
396 skb = rb_to_skb(parent);
397 if (tnext >= netem_skb_cb(skb)->time_to_send)
398 p = &parent->rb_right;
399 else
400 p = &parent->rb_left;
401 }
402 rb_link_node(&nskb->rbnode, parent, p);
403 rb_insert_color(&nskb->rbnode, &q->t_root);
Eric Dumazet50612532011-12-28 23:12:02 +0000404 }
Eric Dumazetaec0a402013-06-28 07:40:57 -0700405 sch->q.qlen++;
Eric Dumazet50612532011-12-28 23:12:02 +0000406}
407
Neil Horman6071bd12016-05-02 12:20:15 -0400408/* netem can't properly corrupt a megapacket (like we get from GSO), so instead
409 * when we statistically choose to corrupt one, we instead segment it, returning
410 * the first packet to be corrupted, and re-enqueue the remaining frames
411 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700412static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch,
413 struct sk_buff **to_free)
Neil Horman6071bd12016-05-02 12:20:15 -0400414{
415 struct sk_buff *segs;
416 netdev_features_t features = netif_skb_features(skb);
417
418 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
419
420 if (IS_ERR_OR_NULL(segs)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700421 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400422 return NULL;
423 }
424 consume_skb(skb);
425 return segs;
426}
427
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700428/*
429 * Insert one skb into qdisc.
430 * Note: parent depends on return value to account for queue length.
431 * NET_XMIT_DROP: queue length didn't change.
432 * NET_XMIT_SUCCESS: one skb was queued.
433 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700434static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch,
435 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436{
437 struct netem_sched_data *q = qdisc_priv(sch);
Guillaume Chazarain89e1df72006-07-21 14:45:25 -0700438 /* We don't fill cb now as skb_unshare() may invalidate it */
439 struct netem_skb_cb *cb;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700440 struct sk_buff *skb2;
Neil Horman6071bd12016-05-02 12:20:15 -0400441 struct sk_buff *segs = NULL;
442 unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb);
443 int nb = 0;
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700444 int count = 1;
Neil Horman6071bd12016-05-02 12:20:15 -0400445 int rc = NET_XMIT_SUCCESS;
Sheng Lan5845f702019-02-28 18:47:58 +0800446 int rc_drop = NET_XMIT_DROP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
Christoph Paasch9410d382018-11-29 16:01:04 -0800448 /* Do not fool qdisc_drop_all() */
449 skb->prev = NULL;
450
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700451 /* Random duplication */
452 if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor))
453 ++count;
454
stephen hemminger661b7972011-02-23 13:04:21 +0000455 /* Drop packet? */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000456 if (loss_event(q)) {
457 if (q->ecn && INET_ECN_set_ce(skb))
John Fastabend25331d62014-09-28 11:53:29 -0700458 qdisc_qstats_drop(sch); /* mark packet */
Eric Dumazete4ae0042012-04-30 23:11:05 +0000459 else
460 --count;
461 }
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700462 if (count == 0) {
John Fastabend25331d62014-09-28 11:53:29 -0700463 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700464 __qdisc_drop(skb, to_free);
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700465 return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 }
467
Eric Dumazet5a308f42012-07-14 03:16:27 +0000468 /* If a delay is expected, orphan the skb. (orphaning usually takes
469 * place at TX completion time, so _before_ the link transit delay)
Eric Dumazet5a308f42012-07-14 03:16:27 +0000470 */
Nik Unger5080f392017-03-13 10:16:58 -0700471 if (q->latency || q->jitter || q->rate)
Eric Dumazetf2f872f2013-07-30 17:55:08 -0700472 skb_orphan_partial(skb);
David S. Miller4e8a5202006-10-22 21:00:33 -0700473
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700474 /*
475 * If we need to duplicate packet, then re-insert at top of the
476 * qdisc tree, since parent queuer expects that only one
477 * skb will be queued.
478 */
479 if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) {
David S. Miller7698b4f2008-07-16 01:42:40 -0700480 struct Qdisc *rootq = qdisc_root(sch);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700481 u32 dupsave = q->duplicate; /* prevent duplicating a dup... */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
Eric Dumazetb396cca2015-05-11 09:06:56 -0700483 q->duplicate = 0;
Eric Dumazet520ac302016-06-21 23:16:49 -0700484 rootq->enqueue(skb2, rootq, to_free);
Stephen Hemminger0afb51e2005-05-26 12:53:49 -0700485 q->duplicate = dupsave;
Sheng Lan5845f702019-02-28 18:47:58 +0800486 rc_drop = NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 }
488
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800489 /*
490 * Randomized packet corruption.
491 * Make copy if needed since we are modifying
492 * If packet is going to be hardware checksummed, then
493 * do it now in software before we mangle it.
494 */
495 if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) {
Neil Horman6071bd12016-05-02 12:20:15 -0400496 if (skb_is_gso(skb)) {
Eric Dumazet520ac302016-06-21 23:16:49 -0700497 segs = netem_segment(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400498 if (!segs)
Sheng Lan5845f702019-02-28 18:47:58 +0800499 return rc_drop;
Neil Horman6071bd12016-05-02 12:20:15 -0400500 } else {
501 segs = skb;
502 }
503
504 skb = segs;
505 segs = segs->next;
506
Eric Dumazet8a6e9c62016-06-28 10:30:08 +0200507 skb = skb_unshare(skb, GFP_ATOMIC);
508 if (unlikely(!skb)) {
509 qdisc_qstats_drop(sch);
510 goto finish_segs;
511 }
512 if (skb->ip_summed == CHECKSUM_PARTIAL &&
513 skb_checksum_help(skb)) {
514 qdisc_drop(skb, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400515 goto finish_segs;
516 }
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800517
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500518 skb->data[prandom_u32() % skb_headlen(skb)] ^=
519 1<<(prandom_u32() % 8);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800520 }
521
Sheng Lan5845f702019-02-28 18:47:58 +0800522 if (unlikely(sch->q.qlen >= sch->limit)) {
523 qdisc_drop_all(skb, sch, to_free);
524 return rc_drop;
525 }
Eric Dumazet960fb662012-07-03 20:55:21 +0000526
John Fastabend25331d62014-09-28 11:53:29 -0700527 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazet960fb662012-07-03 20:55:21 +0000528
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700529 cb = netem_skb_cb(skb);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000530 if (q->gap == 0 || /* not doing reordering */
Vijay Subramaniana42b4792012-01-19 10:20:59 +0000531 q->counter < q->gap - 1 || /* inside last reordering gap */
Joe Perchesf64f9e72009-11-29 16:55:45 -0800532 q->reorder < get_crandom(&q->reorder_cor)) {
Dave Taht112f9cb2017-11-08 15:12:26 -0800533 u64 now;
534 s64 delay;
Stephen Hemminger07aaa112005-11-03 13:43:07 -0800535
536 delay = tabledist(q->latency, q->jitter,
537 &q->delay_cor, q->delay_dist);
538
Dave Taht112f9cb2017-11-08 15:12:26 -0800539 now = ktime_get_ns();
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000540
541 if (q->rate) {
Nik Unger5080f392017-03-13 10:16:58 -0700542 struct netem_skb_cb *last = NULL;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000543
Nik Unger5080f392017-03-13 10:16:58 -0700544 if (sch->q.tail)
545 last = netem_skb_cb(sch->q.tail);
546 if (q->t_root.rb_node) {
547 struct sk_buff *t_skb;
548 struct netem_skb_cb *t_last;
549
Eric Dumazet18a4c0e2017-10-05 22:21:21 -0700550 t_skb = skb_rb_last(&q->t_root);
Nik Unger5080f392017-03-13 10:16:58 -0700551 t_last = netem_skb_cb(t_skb);
552 if (!last ||
Peter Oskolkovd66280b2018-12-04 11:55:56 -0800553 t_last->time_to_send > last->time_to_send)
Nik Unger5080f392017-03-13 10:16:58 -0700554 last = t_last;
Peter Oskolkovd66280b2018-12-04 11:55:56 -0800555 }
556 if (q->t_tail) {
557 struct netem_skb_cb *t_last =
558 netem_skb_cb(q->t_tail);
559
560 if (!last ||
561 t_last->time_to_send > last->time_to_send)
562 last = t_last;
Nik Unger5080f392017-03-13 10:16:58 -0700563 }
564
Eric Dumazetaec0a402013-06-28 07:40:57 -0700565 if (last) {
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000566 /*
Johannes Naaba13d3102013-01-23 11:36:51 +0000567 * Last packet in queue is reference point (now),
568 * calculate this time bonus and subtract
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000569 * from delay.
570 */
Nik Unger5080f392017-03-13 10:16:58 -0700571 delay -= last->time_to_send - now;
Dave Taht112f9cb2017-11-08 15:12:26 -0800572 delay = max_t(s64, 0, delay);
Nik Unger5080f392017-03-13 10:16:58 -0700573 now = last->time_to_send;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000574 }
Johannes Naaba13d3102013-01-23 11:36:51 +0000575
Stephen Hemmingerbce552f2017-11-14 11:27:01 -0800576 delay += packet_time_ns(qdisc_pkt_len(skb), q);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000577 }
578
Patrick McHardy7c59e252007-03-23 11:27:45 -0700579 cb->time_to_send = now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 ++q->counter;
Eric Dumazet960fb662012-07-03 20:55:21 +0000581 tfifo_enqueue(skb, sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 } else {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900583 /*
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700584 * Do re-ordering by putting one out of N packets at the front
585 * of the queue.
586 */
Dave Taht112f9cb2017-11-08 15:12:26 -0800587 cb->time_to_send = ktime_get_ns();
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700588 q->counter = 0;
Jarek Poplawski8ba25da2008-11-02 00:36:03 -0700589
David S. Miller59697732018-07-29 16:33:28 -0700590 __qdisc_enqueue_head(skb, &sch->q);
Hagen Paul Pfeifereb101922012-01-04 17:35:26 +0000591 sch->qstats.requeues++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700592 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
Neil Horman6071bd12016-05-02 12:20:15 -0400594finish_segs:
595 if (segs) {
596 while (segs) {
597 skb2 = segs->next;
David S. Millera8305bf2018-07-29 20:42:53 -0700598 skb_mark_not_on_list(segs);
Neil Horman6071bd12016-05-02 12:20:15 -0400599 qdisc_skb_cb(segs)->pkt_len = segs->len;
600 last_len = segs->len;
Eric Dumazet520ac302016-06-21 23:16:49 -0700601 rc = qdisc_enqueue(segs, sch, to_free);
Neil Horman6071bd12016-05-02 12:20:15 -0400602 if (rc != NET_XMIT_SUCCESS) {
603 if (net_xmit_drop_count(rc))
604 qdisc_qstats_drop(sch);
605 } else {
606 nb++;
607 len += last_len;
608 }
609 segs = skb2;
610 }
611 sch->q.qlen += nb;
612 if (nb > 1)
613 qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len);
614 }
stephen hemminger10f6dfc2011-02-23 13:04:20 +0000615 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616}
617
Dave Taht836af832017-11-08 15:12:28 -0800618/* Delay the next round with a new future slot with a
619 * correct number of bytes and packets.
620 */
621
622static void get_slot_next(struct netem_sched_data *q, u64 now)
623{
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700624 s64 next_delay;
625
626 if (!q->slot_dist)
627 next_delay = q->slot_config.min_delay +
628 (prandom_u32() *
629 (q->slot_config.max_delay -
630 q->slot_config.min_delay) >> 32);
631 else
632 next_delay = tabledist(q->slot_config.dist_delay,
633 (s32)(q->slot_config.dist_jitter),
634 NULL, q->slot_dist);
635
636 q->slot.slot_next = now + next_delay;
Dave Taht836af832017-11-08 15:12:28 -0800637 q->slot.packets_left = q->slot_config.max_packets;
638 q->slot.bytes_left = q->slot_config.max_bytes;
639}
640
Peter Oskolkovd66280b2018-12-04 11:55:56 -0800641static struct sk_buff *netem_peek(struct netem_sched_data *q)
642{
643 struct sk_buff *skb = skb_rb_first(&q->t_root);
644 u64 t1, t2;
645
646 if (!skb)
647 return q->t_head;
648 if (!q->t_head)
649 return skb;
650
651 t1 = netem_skb_cb(skb)->time_to_send;
652 t2 = netem_skb_cb(q->t_head)->time_to_send;
653 if (t1 < t2)
654 return skb;
655 return q->t_head;
656}
657
658static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb)
659{
660 if (skb == q->t_head) {
661 q->t_head = skb->next;
662 if (!q->t_head)
663 q->t_tail = NULL;
664 } else {
665 rb_erase(&skb->rbnode, &q->t_root);
666 }
667}
668
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669static struct sk_buff *netem_dequeue(struct Qdisc *sch)
670{
671 struct netem_sched_data *q = qdisc_priv(sch);
672 struct sk_buff *skb;
673
Eric Dumazet50612532011-12-28 23:12:02 +0000674tfifo_dequeue:
Florian Westphaled760cb2016-09-18 00:57:33 +0200675 skb = __qdisc_dequeue_head(&sch->q);
Stephen Hemminger771018e2005-05-03 16:24:32 -0700676 if (skb) {
John Fastabend25331d62014-09-28 11:53:29 -0700677 qdisc_qstats_backlog_dec(sch, skb);
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000678deliver:
Eric Dumazetaec0a402013-06-28 07:40:57 -0700679 qdisc_bstats_update(sch, skb);
680 return skb;
681 }
Peter Oskolkovd66280b2018-12-04 11:55:56 -0800682 skb = netem_peek(q);
683 if (skb) {
Dave Taht112f9cb2017-11-08 15:12:26 -0800684 u64 time_to_send;
Dave Taht836af832017-11-08 15:12:28 -0800685 u64 now = ktime_get_ns();
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700686
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700687 /* if more time remaining? */
Eric Dumazet36b7bfe2013-07-03 14:04:14 -0700688 time_to_send = netem_skb_cb(skb)->time_to_send;
Dave Taht836af832017-11-08 15:12:28 -0800689 if (q->slot.slot_next && q->slot.slot_next < time_to_send)
690 get_slot_next(q, now);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700691
Peter Oskolkovd66280b2018-12-04 11:55:56 -0800692 if (time_to_send <= now && q->slot.slot_next <= now) {
693 netem_erase_head(q, skb);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700694 sch->q.qlen--;
Beshay, Joseph0ad2a832015-04-06 18:00:56 +0000695 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazetaec0a402013-06-28 07:40:57 -0700696 skb->next = NULL;
697 skb->prev = NULL;
Eric Dumazetbffa72c2017-09-19 05:14:24 -0700698 /* skb->dev shares skb->rbnode area,
699 * we need to restore its value.
700 */
701 skb->dev = qdisc_dev(sch);
Jarek Poplawski03c05f02008-10-31 00:46:19 -0700702
Dave Taht836af832017-11-08 15:12:28 -0800703 if (q->slot.slot_next) {
704 q->slot.packets_left--;
705 q->slot.bytes_left -= qdisc_pkt_len(skb);
706 if (q->slot.packets_left <= 0 ||
707 q->slot.bytes_left <= 0)
708 get_slot_next(q, now);
709 }
710
Eric Dumazet50612532011-12-28 23:12:02 +0000711 if (q->qdisc) {
Eric Dumazet21de12e2016-06-20 15:00:43 -0700712 unsigned int pkt_len = qdisc_pkt_len(skb);
Eric Dumazet520ac302016-06-21 23:16:49 -0700713 struct sk_buff *to_free = NULL;
714 int err;
Eric Dumazet50612532011-12-28 23:12:02 +0000715
Eric Dumazet520ac302016-06-21 23:16:49 -0700716 err = qdisc_enqueue(skb, q->qdisc, &to_free);
717 kfree_skb_list(to_free);
Eric Dumazet21de12e2016-06-20 15:00:43 -0700718 if (err != NET_XMIT_SUCCESS &&
719 net_xmit_drop_count(err)) {
720 qdisc_qstats_drop(sch);
721 qdisc_tree_reduce_backlog(sch, 1,
722 pkt_len);
Eric Dumazet50612532011-12-28 23:12:02 +0000723 }
724 goto tfifo_dequeue;
725 }
Eric Dumazetaec0a402013-06-28 07:40:57 -0700726 goto deliver;
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700727 }
Stephen Hemminger11274e52007-03-22 12:17:42 -0700728
Eric Dumazet50612532011-12-28 23:12:02 +0000729 if (q->qdisc) {
730 skb = q->qdisc->ops->dequeue(q->qdisc);
731 if (skb)
732 goto deliver;
733 }
Dave Taht836af832017-11-08 15:12:28 -0800734
735 qdisc_watchdog_schedule_ns(&q->watchdog,
736 max(time_to_send,
737 q->slot.slot_next));
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700738 }
739
Eric Dumazet50612532011-12-28 23:12:02 +0000740 if (q->qdisc) {
741 skb = q->qdisc->ops->dequeue(q->qdisc);
742 if (skb)
743 goto deliver;
744 }
Stephen Hemminger0f9f32a2005-05-26 12:55:01 -0700745 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746}
747
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748static void netem_reset(struct Qdisc *sch)
749{
750 struct netem_sched_data *q = qdisc_priv(sch);
751
Eric Dumazet50612532011-12-28 23:12:02 +0000752 qdisc_reset_queue(sch);
stephen hemmingerff704052013-10-06 15:16:49 -0700753 tfifo_reset(sch);
Eric Dumazet50612532011-12-28 23:12:02 +0000754 if (q->qdisc)
755 qdisc_reset(q->qdisc);
Patrick McHardy59cb5c62007-03-16 01:20:31 -0700756 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
758
stephen hemminger6373a9a2011-02-23 13:04:18 +0000759static void dist_free(struct disttable *d)
760{
WANG Cong4cb28972014-06-02 15:55:22 -0700761 kvfree(d);
stephen hemminger6373a9a2011-02-23 13:04:18 +0000762}
763
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764/*
765 * Distribution data is a variable size payload containing
766 * signed 16 bit values.
767 */
Dave Taht836af832017-11-08 15:12:28 -0800768
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700769static int get_dist_table(struct Qdisc *sch, struct disttable **tbl,
770 const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771{
stephen hemminger6373a9a2011-02-23 13:04:18 +0000772 size_t n = nla_len(attr)/sizeof(__s16);
Patrick McHardy1e904742008-01-22 22:11:17 -0800773 const __s16 *data = nla_data(attr);
David S. Miller7698b4f2008-07-16 01:42:40 -0700774 spinlock_t *root_lock;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 struct disttable *d;
776 int i;
777
stephen hemmingerdf173bd2011-02-23 13:04:19 +0000778 if (n > NETEM_DIST_MAX)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 return -EINVAL;
780
Michal Hocko752ade62017-05-08 15:57:27 -0700781 d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 if (!d)
783 return -ENOMEM;
784
785 d->size = n;
786 for (i = 0; i < n; i++)
787 d->table[i] = data[i];
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900788
Jarek Poplawski102396a2008-08-29 14:21:52 -0700789 root_lock = qdisc_root_sleeping_lock(sch);
David S. Miller7698b4f2008-07-16 01:42:40 -0700790
791 spin_lock_bh(root_lock);
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700792 swap(*tbl, d);
David S. Miller7698b4f2008-07-16 01:42:40 -0700793 spin_unlock_bh(root_lock);
Eric Dumazetbb52c7a2011-12-23 19:28:51 +0000794
795 dist_free(d);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 return 0;
797}
798
Dave Taht836af832017-11-08 15:12:28 -0800799static void get_slot(struct netem_sched_data *q, const struct nlattr *attr)
800{
801 const struct tc_netem_slot *c = nla_data(attr);
802
803 q->slot_config = *c;
804 if (q->slot_config.max_packets == 0)
805 q->slot_config.max_packets = INT_MAX;
806 if (q->slot_config.max_bytes == 0)
807 q->slot_config.max_bytes = INT_MAX;
808 q->slot.packets_left = q->slot_config.max_packets;
809 q->slot.bytes_left = q->slot_config.max_bytes;
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700810 if (q->slot_config.min_delay | q->slot_config.max_delay |
811 q->slot_config.dist_jitter)
Dave Taht836af832017-11-08 15:12:28 -0800812 q->slot.slot_next = ktime_get_ns();
813 else
814 q->slot.slot_next = 0;
815}
816
Yang Yingliang49545a72014-02-14 10:30:42 +0800817static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818{
Patrick McHardy1e904742008-01-22 22:11:17 -0800819 const struct tc_netem_corr *c = nla_data(attr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 init_crandom(&q->delay_cor, c->delay_corr);
822 init_crandom(&q->loss_cor, c->loss_corr);
823 init_crandom(&q->dup_cor, c->dup_corr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824}
825
Yang Yingliang49545a72014-02-14 10:30:42 +0800826static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700827{
Patrick McHardy1e904742008-01-22 22:11:17 -0800828 const struct tc_netem_reorder *r = nla_data(attr);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700829
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700830 q->reorder = r->probability;
831 init_crandom(&q->reorder_cor, r->correlation);
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700832}
833
Yang Yingliang49545a72014-02-14 10:30:42 +0800834static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr)
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800835{
Patrick McHardy1e904742008-01-22 22:11:17 -0800836 const struct tc_netem_corrupt *r = nla_data(attr);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800837
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800838 q->corrupt = r->probability;
839 init_crandom(&q->corrupt_cor, r->correlation);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800840}
841
Yang Yingliang49545a72014-02-14 10:30:42 +0800842static void get_rate(struct netem_sched_data *q, const struct nlattr *attr)
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000843{
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000844 const struct tc_netem_rate *r = nla_data(attr);
845
846 q->rate = r->rate;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000847 q->packet_overhead = r->packet_overhead;
848 q->cell_size = r->cell_size;
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100849 q->cell_overhead = r->cell_overhead;
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +0000850 if (q->cell_size)
851 q->cell_size_reciprocal = reciprocal_value(q->cell_size);
Hannes Frederic Sowa809fa972014-01-22 02:29:41 +0100852 else
853 q->cell_size_reciprocal = (struct reciprocal_value) { 0 };
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000854}
855
Yang Yingliang49545a72014-02-14 10:30:42 +0800856static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr)
stephen hemminger661b7972011-02-23 13:04:21 +0000857{
stephen hemminger661b7972011-02-23 13:04:21 +0000858 const struct nlattr *la;
859 int rem;
860
861 nla_for_each_nested(la, attr, rem) {
862 u16 type = nla_type(la);
863
Yang Yingliang833fa742013-12-10 20:55:32 +0800864 switch (type) {
stephen hemminger661b7972011-02-23 13:04:21 +0000865 case NETEM_LOSS_GI: {
866 const struct tc_netem_gimodel *gi = nla_data(la);
867
stephen hemminger24946542011-12-23 09:16:30 +0000868 if (nla_len(la) < sizeof(struct tc_netem_gimodel)) {
stephen hemminger661b7972011-02-23 13:04:21 +0000869 pr_info("netem: incorrect gi model size\n");
870 return -EINVAL;
871 }
872
873 q->loss_model = CLG_4_STATES;
874
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800875 q->clg.state = TX_IN_GAP_PERIOD;
stephen hemminger661b7972011-02-23 13:04:21 +0000876 q->clg.a1 = gi->p13;
877 q->clg.a2 = gi->p31;
878 q->clg.a3 = gi->p32;
879 q->clg.a4 = gi->p14;
880 q->clg.a5 = gi->p23;
881 break;
882 }
883
884 case NETEM_LOSS_GE: {
885 const struct tc_netem_gemodel *ge = nla_data(la);
886
stephen hemminger24946542011-12-23 09:16:30 +0000887 if (nla_len(la) < sizeof(struct tc_netem_gemodel)) {
888 pr_info("netem: incorrect ge model size\n");
stephen hemminger661b7972011-02-23 13:04:21 +0000889 return -EINVAL;
890 }
891
892 q->loss_model = CLG_GILB_ELL;
Yang Yingliang3fbac2a2014-02-17 16:48:21 +0800893 q->clg.state = GOOD_STATE;
stephen hemminger661b7972011-02-23 13:04:21 +0000894 q->clg.a1 = ge->p;
895 q->clg.a2 = ge->r;
896 q->clg.a3 = ge->h;
897 q->clg.a4 = ge->k1;
898 break;
899 }
900
901 default:
902 pr_info("netem: unknown loss type %u\n", type);
903 return -EINVAL;
904 }
905 }
906
907 return 0;
908}
909
Patrick McHardy27a34212008-01-23 20:35:39 -0800910static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = {
911 [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) },
912 [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) },
913 [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) },
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +0000914 [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) },
stephen hemminger661b7972011-02-23 13:04:21 +0000915 [TCA_NETEM_LOSS] = { .type = NLA_NESTED },
Eric Dumazete4ae0042012-04-30 23:11:05 +0000916 [TCA_NETEM_ECN] = { .type = NLA_U32 },
Yang Yingliang6a031f62013-12-25 17:35:15 +0800917 [TCA_NETEM_RATE64] = { .type = NLA_U64 },
Dave Taht99803172017-11-08 15:12:27 -0800918 [TCA_NETEM_LATENCY64] = { .type = NLA_S64 },
919 [TCA_NETEM_JITTER64] = { .type = NLA_S64 },
Dave Taht836af832017-11-08 15:12:28 -0800920 [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) },
Patrick McHardy27a34212008-01-23 20:35:39 -0800921};
922
Thomas Graf2c10b322008-09-02 17:30:27 -0700923static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla,
924 const struct nla_policy *policy, int len)
925{
926 int nested_len = nla_len(nla) - NLA_ALIGN(len);
927
stephen hemminger661b7972011-02-23 13:04:21 +0000928 if (nested_len < 0) {
929 pr_info("netem: invalid attributes len %d\n", nested_len);
Thomas Graf2c10b322008-09-02 17:30:27 -0700930 return -EINVAL;
stephen hemminger661b7972011-02-23 13:04:21 +0000931 }
932
Thomas Graf2c10b322008-09-02 17:30:27 -0700933 if (nested_len >= nla_attr_size(0))
Johannes Berg8cb08172019-04-26 14:07:28 +0200934 return nla_parse_deprecated(tb, maxtype,
935 nla_data(nla) + NLA_ALIGN(len),
936 nested_len, policy, NULL);
stephen hemminger661b7972011-02-23 13:04:21 +0000937
Thomas Graf2c10b322008-09-02 17:30:27 -0700938 memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
939 return 0;
940}
941
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -0800942/* Parse netlink message to set options */
Alexander Aring20307212017-12-20 12:35:14 -0500943static int netem_change(struct Qdisc *sch, struct nlattr *opt,
944 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
946 struct netem_sched_data *q = qdisc_priv(sch);
Patrick McHardyb03f4672008-01-23 20:32:21 -0800947 struct nlattr *tb[TCA_NETEM_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 struct tc_netem_qopt *qopt;
Yang Yingliang54a4b052014-02-14 10:30:41 +0800949 struct clgstate old_clg;
950 int old_loss_model = CLG_RANDOM;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 int ret;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900952
Patrick McHardyb03f4672008-01-23 20:32:21 -0800953 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 return -EINVAL;
955
Thomas Graf2c10b322008-09-02 17:30:27 -0700956 qopt = nla_data(opt);
957 ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt));
Patrick McHardyb03f4672008-01-23 20:32:21 -0800958 if (ret < 0)
959 return ret;
960
Yang Yingliang54a4b052014-02-14 10:30:41 +0800961 /* backup q->clg and q->loss_model */
962 old_clg = q->clg;
963 old_loss_model = q->loss_model;
964
965 if (tb[TCA_NETEM_LOSS]) {
Yang Yingliang49545a72014-02-14 10:30:42 +0800966 ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]);
Yang Yingliang54a4b052014-02-14 10:30:41 +0800967 if (ret) {
968 q->loss_model = old_loss_model;
969 return ret;
970 }
971 } else {
972 q->loss_model = CLG_RANDOM;
973 }
974
975 if (tb[TCA_NETEM_DELAY_DIST]) {
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -0700976 ret = get_dist_table(sch, &q->delay_dist,
977 tb[TCA_NETEM_DELAY_DIST]);
978 if (ret)
979 goto get_table_failure;
980 }
981
982 if (tb[TCA_NETEM_SLOT_DIST]) {
983 ret = get_dist_table(sch, &q->slot_dist,
984 tb[TCA_NETEM_SLOT_DIST]);
985 if (ret)
986 goto get_table_failure;
Yang Yingliang54a4b052014-02-14 10:30:41 +0800987 }
988
Eric Dumazet50612532011-12-28 23:12:02 +0000989 sch->limit = qopt->limit;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900990
Dave Taht112f9cb2017-11-08 15:12:26 -0800991 q->latency = PSCHED_TICKS2NS(qopt->latency);
992 q->jitter = PSCHED_TICKS2NS(qopt->jitter);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 q->limit = qopt->limit;
994 q->gap = qopt->gap;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -0700995 q->counter = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 q->loss = qopt->loss;
997 q->duplicate = qopt->duplicate;
998
Stephen Hemmingerbb2f8cc2007-03-23 00:12:09 -0700999 /* for compatibility with earlier versions.
1000 * if gap is set, need to assume 100% probability
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001001 */
Stephen Hemmingera362e0a2007-03-22 12:15:45 -07001002 if (q->gap)
1003 q->reorder = ~0;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001004
Stephen Hemminger265eb672008-11-03 21:13:26 -08001005 if (tb[TCA_NETEM_CORR])
Yang Yingliang49545a72014-02-14 10:30:42 +08001006 get_correlation(q, tb[TCA_NETEM_CORR]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007
Stephen Hemminger265eb672008-11-03 21:13:26 -08001008 if (tb[TCA_NETEM_REORDER])
Yang Yingliang49545a72014-02-14 10:30:42 +08001009 get_reorder(q, tb[TCA_NETEM_REORDER]);
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001010
Stephen Hemminger265eb672008-11-03 21:13:26 -08001011 if (tb[TCA_NETEM_CORRUPT])
Yang Yingliang49545a72014-02-14 10:30:42 +08001012 get_corrupt(q, tb[TCA_NETEM_CORRUPT]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001014 if (tb[TCA_NETEM_RATE])
Yang Yingliang49545a72014-02-14 10:30:42 +08001015 get_rate(q, tb[TCA_NETEM_RATE]);
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001016
Yang Yingliang6a031f62013-12-25 17:35:15 +08001017 if (tb[TCA_NETEM_RATE64])
1018 q->rate = max_t(u64, q->rate,
1019 nla_get_u64(tb[TCA_NETEM_RATE64]));
1020
Dave Taht99803172017-11-08 15:12:27 -08001021 if (tb[TCA_NETEM_LATENCY64])
1022 q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]);
1023
1024 if (tb[TCA_NETEM_JITTER64])
1025 q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]);
1026
Eric Dumazete4ae0042012-04-30 23:11:05 +00001027 if (tb[TCA_NETEM_ECN])
1028 q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]);
1029
Dave Taht836af832017-11-08 15:12:28 -08001030 if (tb[TCA_NETEM_SLOT])
1031 get_slot(q, tb[TCA_NETEM_SLOT]);
1032
stephen hemminger661b7972011-02-23 13:04:21 +00001033 return ret;
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -07001034
1035get_table_failure:
1036 /* recover clg and loss_model, in case of
1037 * q->clg and q->loss_model were modified
1038 * in get_loss_clg()
1039 */
1040 q->clg = old_clg;
1041 q->loss_model = old_loss_model;
1042 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043}
1044
Alexander Aringe63d7df2017-12-20 12:35:13 -05001045static int netem_init(struct Qdisc *sch, struct nlattr *opt,
1046 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047{
1048 struct netem_sched_data *q = qdisc_priv(sch);
1049 int ret;
1050
Nikolay Aleksandrov634576a2017-08-30 12:49:03 +03001051 qdisc_watchdog_init(&q->watchdog, sch);
1052
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 if (!opt)
1054 return -EINVAL;
1055
stephen hemminger661b7972011-02-23 13:04:21 +00001056 q->loss_model = CLG_RANDOM;
Alexander Aring20307212017-12-20 12:35:14 -05001057 ret = netem_change(sch, opt, extack);
Eric Dumazet50612532011-12-28 23:12:02 +00001058 if (ret)
stephen hemminger250a65f2011-02-23 13:04:22 +00001059 pr_info("netem: change failed\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060 return ret;
1061}
1062
1063static void netem_destroy(struct Qdisc *sch)
1064{
1065 struct netem_sched_data *q = qdisc_priv(sch);
1066
Patrick McHardy59cb5c62007-03-16 01:20:31 -07001067 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazet50612532011-12-28 23:12:02 +00001068 if (q->qdisc)
Vlad Buslov86bd4462018-09-24 19:22:50 +03001069 qdisc_put(q->qdisc);
stephen hemminger6373a9a2011-02-23 13:04:18 +00001070 dist_free(q->delay_dist);
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -07001071 dist_free(q->slot_dist);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072}
1073
stephen hemminger661b7972011-02-23 13:04:21 +00001074static int dump_loss_model(const struct netem_sched_data *q,
1075 struct sk_buff *skb)
1076{
1077 struct nlattr *nest;
1078
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001079 nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS);
stephen hemminger661b7972011-02-23 13:04:21 +00001080 if (nest == NULL)
1081 goto nla_put_failure;
1082
1083 switch (q->loss_model) {
1084 case CLG_RANDOM:
1085 /* legacy loss model */
1086 nla_nest_cancel(skb, nest);
1087 return 0; /* no data */
1088
1089 case CLG_4_STATES: {
1090 struct tc_netem_gimodel gi = {
1091 .p13 = q->clg.a1,
1092 .p31 = q->clg.a2,
1093 .p32 = q->clg.a3,
1094 .p14 = q->clg.a4,
1095 .p23 = q->clg.a5,
1096 };
1097
David S. Miller1b34ec42012-03-29 05:11:39 -04001098 if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi))
1099 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +00001100 break;
1101 }
1102 case CLG_GILB_ELL: {
1103 struct tc_netem_gemodel ge = {
1104 .p = q->clg.a1,
1105 .r = q->clg.a2,
1106 .h = q->clg.a3,
1107 .k1 = q->clg.a4,
1108 };
1109
David S. Miller1b34ec42012-03-29 05:11:39 -04001110 if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge))
1111 goto nla_put_failure;
stephen hemminger661b7972011-02-23 13:04:21 +00001112 break;
1113 }
1114 }
1115
1116 nla_nest_end(skb, nest);
1117 return 0;
1118
1119nla_put_failure:
1120 nla_nest_cancel(skb, nest);
1121 return -1;
1122}
1123
Linus Torvalds1da177e2005-04-16 15:20:36 -07001124static int netem_dump(struct Qdisc *sch, struct sk_buff *skb)
1125{
1126 const struct netem_sched_data *q = qdisc_priv(sch);
stephen hemminger861d7f72011-02-23 13:04:17 +00001127 struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128 struct tc_netem_qopt qopt;
1129 struct tc_netem_corr cor;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001130 struct tc_netem_reorder reorder;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001131 struct tc_netem_corrupt corrupt;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001132 struct tc_netem_rate rate;
Dave Taht836af832017-11-08 15:12:28 -08001133 struct tc_netem_slot slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
Dave Taht112f9cb2017-11-08 15:12:26 -08001135 qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency),
1136 UINT_MAX);
1137 qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter),
1138 UINT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139 qopt.limit = q->limit;
1140 qopt.loss = q->loss;
1141 qopt.gap = q->gap;
1142 qopt.duplicate = q->duplicate;
David S. Miller1b34ec42012-03-29 05:11:39 -04001143 if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt))
1144 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
Dave Taht99803172017-11-08 15:12:27 -08001146 if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency))
1147 goto nla_put_failure;
1148
1149 if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter))
1150 goto nla_put_failure;
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 cor.delay_corr = q->delay_cor.rho;
1153 cor.loss_corr = q->loss_cor.rho;
1154 cor.dup_corr = q->dup_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001155 if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor))
1156 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001157
1158 reorder.probability = q->reorder;
1159 reorder.correlation = q->reorder_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001160 if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder))
1161 goto nla_put_failure;
Stephen Hemminger0dca51d2005-05-26 12:55:48 -07001162
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001163 corrupt.probability = q->corrupt;
1164 corrupt.correlation = q->corrupt_cor.rho;
David S. Miller1b34ec42012-03-29 05:11:39 -04001165 if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt))
1166 goto nla_put_failure;
Stephen Hemmingerc865e5d2005-12-21 19:03:44 -08001167
Yang Yingliang6a031f62013-12-25 17:35:15 +08001168 if (q->rate >= (1ULL << 32)) {
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001169 if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate,
1170 TCA_NETEM_PAD))
Yang Yingliang6a031f62013-12-25 17:35:15 +08001171 goto nla_put_failure;
1172 rate.rate = ~0U;
1173 } else {
1174 rate.rate = q->rate;
1175 }
Hagen Paul Pfeifer90b41a12011-12-12 14:30:00 +00001176 rate.packet_overhead = q->packet_overhead;
1177 rate.cell_size = q->cell_size;
1178 rate.cell_overhead = q->cell_overhead;
David S. Miller1b34ec42012-03-29 05:11:39 -04001179 if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate))
1180 goto nla_put_failure;
Hagen Paul Pfeifer7bc0f282011-11-30 12:20:26 +00001181
Eric Dumazete4ae0042012-04-30 23:11:05 +00001182 if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn))
1183 goto nla_put_failure;
1184
stephen hemminger661b7972011-02-23 13:04:21 +00001185 if (dump_loss_model(q, skb) != 0)
1186 goto nla_put_failure;
1187
Yousuk Seung0a9fe5c2018-06-27 10:32:19 -07001188 if (q->slot_config.min_delay | q->slot_config.max_delay |
1189 q->slot_config.dist_jitter) {
Dave Taht836af832017-11-08 15:12:28 -08001190 slot = q->slot_config;
1191 if (slot.max_packets == INT_MAX)
1192 slot.max_packets = 0;
1193 if (slot.max_bytes == INT_MAX)
1194 slot.max_bytes = 0;
1195 if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot))
1196 goto nla_put_failure;
1197 }
1198
stephen hemminger861d7f72011-02-23 13:04:17 +00001199 return nla_nest_end(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
Patrick McHardy1e904742008-01-22 22:11:17 -08001201nla_put_failure:
stephen hemminger861d7f72011-02-23 13:04:17 +00001202 nlmsg_trim(skb, nla);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 return -1;
1204}
1205
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001206static int netem_dump_class(struct Qdisc *sch, unsigned long cl,
1207 struct sk_buff *skb, struct tcmsg *tcm)
1208{
1209 struct netem_sched_data *q = qdisc_priv(sch);
1210
Eric Dumazet50612532011-12-28 23:12:02 +00001211 if (cl != 1 || !q->qdisc) /* only one class */
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001212 return -ENOENT;
1213
1214 tcm->tcm_handle |= TC_H_MIN(1);
1215 tcm->tcm_info = q->qdisc->handle;
1216
1217 return 0;
1218}
1219
1220static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -05001221 struct Qdisc **old, struct netlink_ext_ack *extack)
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001222{
1223 struct netem_sched_data *q = qdisc_priv(sch);
1224
WANG Cong86a79962016-02-25 14:55:00 -08001225 *old = qdisc_replace(sch, new, &q->qdisc);
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001226 return 0;
1227}
1228
1229static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg)
1230{
1231 struct netem_sched_data *q = qdisc_priv(sch);
1232 return q->qdisc;
1233}
1234
WANG Cong143976c2017-08-24 16:51:29 -07001235static unsigned long netem_find(struct Qdisc *sch, u32 classid)
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001236{
1237 return 1;
1238}
1239
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001240static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker)
1241{
1242 if (!walker->stop) {
1243 if (walker->count >= walker->skip)
1244 if (walker->fn(sch, 1, walker) < 0) {
1245 walker->stop = 1;
1246 return;
1247 }
1248 walker->count++;
1249 }
1250}
1251
1252static const struct Qdisc_class_ops netem_class_ops = {
1253 .graft = netem_graft,
1254 .leaf = netem_leaf,
WANG Cong143976c2017-08-24 16:51:29 -07001255 .find = netem_find,
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001256 .walk = netem_walk,
1257 .dump = netem_dump_class,
1258};
1259
Eric Dumazet20fea082007-11-14 01:44:41 -08001260static struct Qdisc_ops netem_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 .id = "netem",
stephen hemminger10f6dfc2011-02-23 13:04:20 +00001262 .cl_ops = &netem_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 .priv_size = sizeof(struct netem_sched_data),
1264 .enqueue = netem_enqueue,
1265 .dequeue = netem_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001266 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 .init = netem_init,
1268 .reset = netem_reset,
1269 .destroy = netem_destroy,
1270 .change = netem_change,
1271 .dump = netem_dump,
1272 .owner = THIS_MODULE,
1273};
1274
1275
1276static int __init netem_module_init(void)
1277{
Stephen Hemmingereb229c42005-11-03 13:49:01 -08001278 pr_info("netem: version " VERSION "\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 return register_qdisc(&netem_qdisc_ops);
1280}
1281static void __exit netem_module_exit(void)
1282{
1283 unregister_qdisc(&netem_qdisc_ops);
1284}
1285module_init(netem_module_init)
1286module_exit(netem_module_exit)
1287MODULE_LICENSE("GPL");