Thomas Gleixner | 84a14ae | 2019-05-28 09:57:07 -0700 | [diff] [blame^] | 1 | // SPDX-License-Identifier: GPL-2.0-only |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | /* |
| 3 | * net/sched/sch_netem.c Network emulator |
| 4 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * Many of the algorithms and ideas for this came from |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 6 | * NIST Net which is not copyrighted. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | * |
| 8 | * Authors: Stephen Hemminger <shemminger@osdl.org> |
| 9 | * Catalin(ux aka Dino) BOIE <catab at umbrella dot ro> |
| 10 | */ |
| 11 | |
Alexey Dobriyan | b7f080c | 2011-06-16 11:01:34 +0000 | [diff] [blame] | 12 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/module.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 14 | #include <linux/slab.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/types.h> |
| 16 | #include <linux/kernel.h> |
| 17 | #include <linux/errno.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | #include <linux/skbuff.h> |
David S. Miller | 78776d3 | 2011-02-24 22:48:13 -0800 | [diff] [blame] | 19 | #include <linux/vmalloc.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <linux/rtnetlink.h> |
Hagen Paul Pfeifer | 90b41a1 | 2011-12-12 14:30:00 +0000 | [diff] [blame] | 21 | #include <linux/reciprocal_div.h> |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 22 | #include <linux/rbtree.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Arnaldo Carvalho de Melo | dc5fc57 | 2007-03-25 23:06:12 -0700 | [diff] [blame] | 24 | #include <net/netlink.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | #include <net/pkt_sched.h> |
Eric Dumazet | e4ae004 | 2012-04-30 23:11:05 +0000 | [diff] [blame] | 26 | #include <net/inet_ecn.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | |
stephen hemminger | 250a65f | 2011-02-23 13:04:22 +0000 | [diff] [blame] | 28 | #define VERSION "1.3" |
Stephen Hemminger | eb229c4 | 2005-11-03 13:49:01 -0800 | [diff] [blame] | 29 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | /* Network Emulation Queuing algorithm. |
| 31 | ==================================== |
| 32 | |
| 33 | Sources: [1] Mark Carson, Darrin Santay, "NIST Net - A Linux-based |
| 34 | Network Emulation Tool |
| 35 | [2] Luigi Rizzo, DummyNet for FreeBSD |
| 36 | |
| 37 | ---------------------------------------------------------------- |
| 38 | |
| 39 | This started out as a simple way to delay outgoing packets to |
| 40 | test TCP but has grown to include most of the functionality |
| 41 | of a full blown network emulator like NISTnet. It can delay |
| 42 | packets and add random jitter (and correlation). The random |
| 43 | distribution can be loaded from a table as well to provide |
| 44 | normal, Pareto, or experimental curves. Packet loss, |
| 45 | duplication, and reordering can also be emulated. |
| 46 | |
| 47 | This qdisc does not do classification that can be handled in |
| 48 | layering other disciplines. It does not need to do bandwidth |
| 49 | control either since that can be handled by using token |
| 50 | bucket or other rate control. |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 51 | |
| 52 | Correlated Loss Generator models |
| 53 | |
| 54 | Added generation of correlated loss according to the |
| 55 | "Gilbert-Elliot" model, a 4-state markov model. |
| 56 | |
| 57 | References: |
| 58 | [1] NetemCLG Home http://netgroup.uniroma2.it/NetemCLG |
| 59 | [2] S. Salsano, F. Ludovici, A. Ordine, "Definition of a general |
| 60 | and intuitive loss model for packet networks and its implementation |
| 61 | in the Netem module in the Linux kernel", available in [1] |
| 62 | |
| 63 | Authors: Stefano Salsano <stefano.salsano at uniroma2.it |
| 64 | Fabio Ludovici <fabio.ludovici at yahoo.it> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | */ |
| 66 | |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 67 | struct disttable { |
| 68 | u32 size; |
| 69 | s16 table[0]; |
| 70 | }; |
| 71 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | struct netem_sched_data { |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 73 | /* internal t(ime)fifo qdisc uses t_root and sch->limit */ |
| 74 | struct rb_root t_root; |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 75 | |
Peter Oskolkov | d66280b | 2018-12-04 11:55:56 -0800 | [diff] [blame] | 76 | /* a linear queue; reduces rbtree rebalancing when jitter is low */ |
| 77 | struct sk_buff *t_head; |
| 78 | struct sk_buff *t_tail; |
| 79 | |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 80 | /* optional qdisc for classful handling (NULL at netem init) */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 81 | struct Qdisc *qdisc; |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 82 | |
Patrick McHardy | 59cb5c6 | 2007-03-16 01:20:31 -0700 | [diff] [blame] | 83 | struct qdisc_watchdog watchdog; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 85 | s64 latency; |
| 86 | s64 jitter; |
Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 87 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | u32 loss; |
Eric Dumazet | e4ae004 | 2012-04-30 23:11:05 +0000 | [diff] [blame] | 89 | u32 ecn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | u32 limit; |
| 91 | u32 counter; |
| 92 | u32 gap; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | u32 duplicate; |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 94 | u32 reorder; |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 95 | u32 corrupt; |
Yang Yingliang | 6a031f6 | 2013-12-25 17:35:15 +0800 | [diff] [blame] | 96 | u64 rate; |
Hagen Paul Pfeifer | 90b41a1 | 2011-12-12 14:30:00 +0000 | [diff] [blame] | 97 | s32 packet_overhead; |
| 98 | u32 cell_size; |
Hannes Frederic Sowa | 809fa97 | 2014-01-22 02:29:41 +0100 | [diff] [blame] | 99 | struct reciprocal_value cell_size_reciprocal; |
Hagen Paul Pfeifer | 90b41a1 | 2011-12-12 14:30:00 +0000 | [diff] [blame] | 100 | s32 cell_overhead; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | |
| 102 | struct crndstate { |
Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 103 | u32 last; |
| 104 | u32 rho; |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 105 | } delay_cor, loss_cor, dup_cor, reorder_cor, corrupt_cor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 107 | struct disttable *delay_dist; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 108 | |
| 109 | enum { |
| 110 | CLG_RANDOM, |
| 111 | CLG_4_STATES, |
| 112 | CLG_GILB_ELL, |
| 113 | } loss_model; |
| 114 | |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 115 | enum { |
| 116 | TX_IN_GAP_PERIOD = 1, |
| 117 | TX_IN_BURST_PERIOD, |
| 118 | LOST_IN_GAP_PERIOD, |
| 119 | LOST_IN_BURST_PERIOD, |
| 120 | } _4_state_model; |
| 121 | |
Yang Yingliang | c045a73 | 2014-02-14 10:30:43 +0800 | [diff] [blame] | 122 | enum { |
| 123 | GOOD_STATE = 1, |
| 124 | BAD_STATE, |
| 125 | } GE_state_model; |
| 126 | |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 127 | /* Correlated Loss Generation models */ |
| 128 | struct clgstate { |
| 129 | /* state of the Markov chain */ |
| 130 | u8 state; |
| 131 | |
| 132 | /* 4-states and Gilbert-Elliot models */ |
| 133 | u32 a1; /* p13 for 4-states or p for GE */ |
| 134 | u32 a2; /* p31 for 4-states or r for GE */ |
| 135 | u32 a3; /* p32 for 4-states or h for GE */ |
| 136 | u32 a4; /* p14 for 4-states or 1-k for GE */ |
| 137 | u32 a5; /* p23 used only in 4-states */ |
| 138 | } clg; |
| 139 | |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 140 | struct tc_netem_slot slot_config; |
| 141 | struct slotstate { |
| 142 | u64 slot_next; |
| 143 | s32 packets_left; |
| 144 | s32 bytes_left; |
| 145 | } slot; |
| 146 | |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 147 | struct disttable *slot_dist; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 148 | }; |
| 149 | |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 150 | /* Time stamp put into socket buffer control block |
| 151 | * Only valid when skbs are in our internal t(ime)fifo queue. |
Eric Dumazet | 56b1742 | 2014-11-03 08:19:53 -0800 | [diff] [blame] | 152 | * |
| 153 | * As skb->rbnode uses same storage than skb->next, skb->prev and skb->tstamp, |
| 154 | * and skb->next & skb->prev are scratch space for a qdisc, |
| 155 | * we save skb->tstamp value in skb->cb[] before destroying it. |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 156 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | struct netem_skb_cb { |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 158 | u64 time_to_send; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | }; |
| 160 | |
Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 161 | static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) |
| 162 | { |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 163 | /* we assume we can use skb next/prev/tstamp as storage for rb_node */ |
David S. Miller | 16bda13 | 2012-02-06 15:14:37 -0500 | [diff] [blame] | 164 | qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); |
Jussi Kivilinna | 175f9c1 | 2008-07-20 00:08:47 -0700 | [diff] [blame] | 165 | return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; |
Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 166 | } |
| 167 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | /* init_crandom - initialize correlated random number generator |
| 169 | * Use entropy source for initial seed. |
| 170 | */ |
| 171 | static void init_crandom(struct crndstate *state, unsigned long rho) |
| 172 | { |
| 173 | state->rho = rho; |
Aruna-Hewapathirane | 63862b5 | 2014-01-11 07:15:59 -0500 | [diff] [blame] | 174 | state->last = prandom_u32(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | /* get_crandom - correlated random number generator |
| 178 | * Next number depends on last value. |
| 179 | * rho is scaled to avoid floating point. |
| 180 | */ |
Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 181 | static u32 get_crandom(struct crndstate *state) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | { |
| 183 | u64 value, rho; |
| 184 | unsigned long answer; |
| 185 | |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 186 | if (!state || state->rho == 0) /* no correlation */ |
Aruna-Hewapathirane | 63862b5 | 2014-01-11 07:15:59 -0500 | [diff] [blame] | 187 | return prandom_u32(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 188 | |
Aruna-Hewapathirane | 63862b5 | 2014-01-11 07:15:59 -0500 | [diff] [blame] | 189 | value = prandom_u32(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 190 | rho = (u64)state->rho + 1; |
| 191 | answer = (value * ((1ull<<32) - rho) + state->last * rho) >> 32; |
| 192 | state->last = answer; |
| 193 | return answer; |
| 194 | } |
| 195 | |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 196 | /* loss_4state - 4-state model loss generator |
| 197 | * Generates losses according to the 4-state Markov chain adopted in |
| 198 | * the GI (General and Intuitive) loss model. |
| 199 | */ |
| 200 | static bool loss_4state(struct netem_sched_data *q) |
| 201 | { |
| 202 | struct clgstate *clg = &q->clg; |
Aruna-Hewapathirane | 63862b5 | 2014-01-11 07:15:59 -0500 | [diff] [blame] | 203 | u32 rnd = prandom_u32(); |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 204 | |
| 205 | /* |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 206 | * Makes a comparison between rnd and the transition |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 207 | * probabilities outgoing from the current state, then decides the |
| 208 | * next state and if the next packet has to be transmitted or lost. |
| 209 | * The four states correspond to: |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 210 | * TX_IN_GAP_PERIOD => successfully transmitted packets within a gap period |
| 211 | * LOST_IN_BURST_PERIOD => isolated losses within a gap period |
| 212 | * LOST_IN_GAP_PERIOD => lost packets within a burst period |
| 213 | * TX_IN_GAP_PERIOD => successfully transmitted packets within a burst period |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 214 | */ |
| 215 | switch (clg->state) { |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 216 | case TX_IN_GAP_PERIOD: |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 217 | if (rnd < clg->a4) { |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 218 | clg->state = LOST_IN_BURST_PERIOD; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 219 | return true; |
stephen hemminger | ab6c27b | 2013-11-29 11:03:35 -0800 | [diff] [blame] | 220 | } else if (clg->a4 < rnd && rnd < clg->a1 + clg->a4) { |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 221 | clg->state = LOST_IN_GAP_PERIOD; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 222 | return true; |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 223 | } else if (clg->a1 + clg->a4 < rnd) { |
| 224 | clg->state = TX_IN_GAP_PERIOD; |
| 225 | } |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 226 | |
| 227 | break; |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 228 | case TX_IN_BURST_PERIOD: |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 229 | if (rnd < clg->a5) { |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 230 | clg->state = LOST_IN_GAP_PERIOD; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 231 | return true; |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 232 | } else { |
| 233 | clg->state = TX_IN_BURST_PERIOD; |
| 234 | } |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 235 | |
| 236 | break; |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 237 | case LOST_IN_GAP_PERIOD: |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 238 | if (rnd < clg->a3) |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 239 | clg->state = TX_IN_BURST_PERIOD; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 240 | else if (clg->a3 < rnd && rnd < clg->a2 + clg->a3) { |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 241 | clg->state = TX_IN_GAP_PERIOD; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 242 | } else if (clg->a2 + clg->a3 < rnd) { |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 243 | clg->state = LOST_IN_GAP_PERIOD; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 244 | return true; |
| 245 | } |
| 246 | break; |
Yang Yingliang | a6e2fe1 | 2014-01-18 18:13:31 +0800 | [diff] [blame] | 247 | case LOST_IN_BURST_PERIOD: |
| 248 | clg->state = TX_IN_GAP_PERIOD; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 249 | break; |
| 250 | } |
| 251 | |
| 252 | return false; |
| 253 | } |
| 254 | |
| 255 | /* loss_gilb_ell - Gilbert-Elliot model loss generator |
| 256 | * Generates losses according to the Gilbert-Elliot loss model or |
| 257 | * its special cases (Gilbert or Simple Gilbert) |
| 258 | * |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 259 | * Makes a comparison between random number and the transition |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 260 | * probabilities outgoing from the current state, then decides the |
Lucas De Marchi | 25985ed | 2011-03-30 22:57:33 -0300 | [diff] [blame] | 261 | * next state. A second random number is extracted and the comparison |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 262 | * with the loss probability of the current state decides if the next |
| 263 | * packet will be transmitted or lost. |
| 264 | */ |
| 265 | static bool loss_gilb_ell(struct netem_sched_data *q) |
| 266 | { |
| 267 | struct clgstate *clg = &q->clg; |
| 268 | |
| 269 | switch (clg->state) { |
Yang Yingliang | c045a73 | 2014-02-14 10:30:43 +0800 | [diff] [blame] | 270 | case GOOD_STATE: |
Aruna-Hewapathirane | 63862b5 | 2014-01-11 07:15:59 -0500 | [diff] [blame] | 271 | if (prandom_u32() < clg->a1) |
Yang Yingliang | c045a73 | 2014-02-14 10:30:43 +0800 | [diff] [blame] | 272 | clg->state = BAD_STATE; |
Aruna-Hewapathirane | 63862b5 | 2014-01-11 07:15:59 -0500 | [diff] [blame] | 273 | if (prandom_u32() < clg->a4) |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 274 | return true; |
stephen hemminger | 7c2781f | 2013-11-29 11:02:43 -0800 | [diff] [blame] | 275 | break; |
Yang Yingliang | c045a73 | 2014-02-14 10:30:43 +0800 | [diff] [blame] | 276 | case BAD_STATE: |
Aruna-Hewapathirane | 63862b5 | 2014-01-11 07:15:59 -0500 | [diff] [blame] | 277 | if (prandom_u32() < clg->a2) |
Yang Yingliang | c045a73 | 2014-02-14 10:30:43 +0800 | [diff] [blame] | 278 | clg->state = GOOD_STATE; |
Aruna-Hewapathirane | 63862b5 | 2014-01-11 07:15:59 -0500 | [diff] [blame] | 279 | if (prandom_u32() > clg->a3) |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 280 | return true; |
| 281 | } |
| 282 | |
| 283 | return false; |
| 284 | } |
| 285 | |
| 286 | static bool loss_event(struct netem_sched_data *q) |
| 287 | { |
| 288 | switch (q->loss_model) { |
| 289 | case CLG_RANDOM: |
| 290 | /* Random packet drop 0 => none, ~0 => all */ |
| 291 | return q->loss && q->loss >= get_crandom(&q->loss_cor); |
| 292 | |
| 293 | case CLG_4_STATES: |
| 294 | /* 4state loss model algorithm (used also for GI model) |
| 295 | * Extracts a value from the markov 4 state loss generator, |
| 296 | * if it is 1 drops a packet and if needed writes the event in |
| 297 | * the kernel logs |
| 298 | */ |
| 299 | return loss_4state(q); |
| 300 | |
| 301 | case CLG_GILB_ELL: |
| 302 | /* Gilbert-Elliot loss model algorithm |
| 303 | * Extracts a value from the Gilbert-Elliot loss generator, |
| 304 | * if it is 1 drops a packet and if needed writes the event in |
| 305 | * the kernel logs |
| 306 | */ |
| 307 | return loss_gilb_ell(q); |
| 308 | } |
| 309 | |
| 310 | return false; /* not reached */ |
| 311 | } |
| 312 | |
| 313 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 314 | /* tabledist - return a pseudo-randomly distributed value with mean mu and |
| 315 | * std deviation sigma. Uses table lookup to approximate the desired |
| 316 | * distribution, and a uniformly-distributed pseudo-random source. |
| 317 | */ |
Stephen Hemminger | 9b0ed89 | 2017-11-14 11:27:02 -0800 | [diff] [blame] | 318 | static s64 tabledist(s64 mu, s32 sigma, |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 319 | struct crndstate *state, |
Stephen Hemminger | 9b0ed89 | 2017-11-14 11:27:02 -0800 | [diff] [blame] | 320 | const struct disttable *dist) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | { |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 322 | s64 x; |
Stephen Hemminger | b407621 | 2007-03-22 12:16:21 -0700 | [diff] [blame] | 323 | long t; |
| 324 | u32 rnd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 325 | |
| 326 | if (sigma == 0) |
| 327 | return mu; |
| 328 | |
| 329 | rnd = get_crandom(state); |
| 330 | |
| 331 | /* default uniform distribution */ |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 332 | if (dist == NULL) |
Md. Islam | 043e337 | 2018-02-06 23:14:18 -0500 | [diff] [blame] | 333 | return ((rnd % (2 * sigma)) + mu) - sigma; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | |
| 335 | t = dist->table[rnd % dist->size]; |
| 336 | x = (sigma % NETEM_DIST_SCALE) * t; |
| 337 | if (x >= 0) |
| 338 | x += NETEM_DIST_SCALE/2; |
| 339 | else |
| 340 | x -= NETEM_DIST_SCALE/2; |
| 341 | |
| 342 | return x / NETEM_DIST_SCALE + (sigma / NETEM_DIST_SCALE) * t + mu; |
| 343 | } |
| 344 | |
Stephen Hemminger | bce552f | 2017-11-14 11:27:01 -0800 | [diff] [blame] | 345 | static u64 packet_time_ns(u64 len, const struct netem_sched_data *q) |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 346 | { |
Hagen Paul Pfeifer | 90b41a1 | 2011-12-12 14:30:00 +0000 | [diff] [blame] | 347 | len += q->packet_overhead; |
| 348 | |
| 349 | if (q->cell_size) { |
| 350 | u32 cells = reciprocal_divide(len, q->cell_size_reciprocal); |
| 351 | |
| 352 | if (len > cells * q->cell_size) /* extra cell needed for remainder */ |
| 353 | cells++; |
| 354 | len = cells * (q->cell_size + q->cell_overhead); |
| 355 | } |
Stephen Hemminger | bce552f | 2017-11-14 11:27:01 -0800 | [diff] [blame] | 356 | |
| 357 | return div64_u64(len * NSEC_PER_SEC, q->rate); |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 358 | } |
| 359 | |
stephen hemminger | ff70405 | 2013-10-06 15:16:49 -0700 | [diff] [blame] | 360 | static void tfifo_reset(struct Qdisc *sch) |
| 361 | { |
| 362 | struct netem_sched_data *q = qdisc_priv(sch); |
Eric Dumazet | 3aa605f | 2017-09-23 11:07:28 -0700 | [diff] [blame] | 363 | struct rb_node *p = rb_first(&q->t_root); |
stephen hemminger | ff70405 | 2013-10-06 15:16:49 -0700 | [diff] [blame] | 364 | |
Eric Dumazet | 3aa605f | 2017-09-23 11:07:28 -0700 | [diff] [blame] | 365 | while (p) { |
Eric Dumazet | 18a4c0e | 2017-10-05 22:21:21 -0700 | [diff] [blame] | 366 | struct sk_buff *skb = rb_to_skb(p); |
stephen hemminger | ff70405 | 2013-10-06 15:16:49 -0700 | [diff] [blame] | 367 | |
Eric Dumazet | 3aa605f | 2017-09-23 11:07:28 -0700 | [diff] [blame] | 368 | p = rb_next(p); |
| 369 | rb_erase(&skb->rbnode, &q->t_root); |
Eric Dumazet | 2f08a9a | 2016-06-13 20:21:57 -0700 | [diff] [blame] | 370 | rtnl_kfree_skbs(skb, skb); |
stephen hemminger | ff70405 | 2013-10-06 15:16:49 -0700 | [diff] [blame] | 371 | } |
Peter Oskolkov | d66280b | 2018-12-04 11:55:56 -0800 | [diff] [blame] | 372 | |
| 373 | rtnl_kfree_skbs(q->t_head, q->t_tail); |
| 374 | q->t_head = NULL; |
| 375 | q->t_tail = NULL; |
stephen hemminger | ff70405 | 2013-10-06 15:16:49 -0700 | [diff] [blame] | 376 | } |
| 377 | |
Eric Dumazet | 960fb66 | 2012-07-03 20:55:21 +0000 | [diff] [blame] | 378 | static void tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch) |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 379 | { |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 380 | struct netem_sched_data *q = qdisc_priv(sch); |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 381 | u64 tnext = netem_skb_cb(nskb)->time_to_send; |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 382 | |
Peter Oskolkov | d66280b | 2018-12-04 11:55:56 -0800 | [diff] [blame] | 383 | if (!q->t_tail || tnext >= netem_skb_cb(q->t_tail)->time_to_send) { |
| 384 | if (q->t_tail) |
| 385 | q->t_tail->next = nskb; |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 386 | else |
Peter Oskolkov | d66280b | 2018-12-04 11:55:56 -0800 | [diff] [blame] | 387 | q->t_head = nskb; |
| 388 | q->t_tail = nskb; |
| 389 | } else { |
| 390 | struct rb_node **p = &q->t_root.rb_node, *parent = NULL; |
| 391 | |
| 392 | while (*p) { |
| 393 | struct sk_buff *skb; |
| 394 | |
| 395 | parent = *p; |
| 396 | skb = rb_to_skb(parent); |
| 397 | if (tnext >= netem_skb_cb(skb)->time_to_send) |
| 398 | p = &parent->rb_right; |
| 399 | else |
| 400 | p = &parent->rb_left; |
| 401 | } |
| 402 | rb_link_node(&nskb->rbnode, parent, p); |
| 403 | rb_insert_color(&nskb->rbnode, &q->t_root); |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 404 | } |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 405 | sch->q.qlen++; |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 406 | } |
| 407 | |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 408 | /* netem can't properly corrupt a megapacket (like we get from GSO), so instead |
| 409 | * when we statistically choose to corrupt one, we instead segment it, returning |
| 410 | * the first packet to be corrupted, and re-enqueue the remaining frames |
| 411 | */ |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 412 | static struct sk_buff *netem_segment(struct sk_buff *skb, struct Qdisc *sch, |
| 413 | struct sk_buff **to_free) |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 414 | { |
| 415 | struct sk_buff *segs; |
| 416 | netdev_features_t features = netif_skb_features(skb); |
| 417 | |
| 418 | segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); |
| 419 | |
| 420 | if (IS_ERR_OR_NULL(segs)) { |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 421 | qdisc_drop(skb, sch, to_free); |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 422 | return NULL; |
| 423 | } |
| 424 | consume_skb(skb); |
| 425 | return segs; |
| 426 | } |
| 427 | |
Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 428 | /* |
| 429 | * Insert one skb into qdisc. |
| 430 | * Note: parent depends on return value to account for queue length. |
| 431 | * NET_XMIT_DROP: queue length didn't change. |
| 432 | * NET_XMIT_SUCCESS: one skb was queued. |
| 433 | */ |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 434 | static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch, |
| 435 | struct sk_buff **to_free) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | { |
| 437 | struct netem_sched_data *q = qdisc_priv(sch); |
Guillaume Chazarain | 89e1df7 | 2006-07-21 14:45:25 -0700 | [diff] [blame] | 438 | /* We don't fill cb now as skb_unshare() may invalidate it */ |
| 439 | struct netem_skb_cb *cb; |
Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 440 | struct sk_buff *skb2; |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 441 | struct sk_buff *segs = NULL; |
| 442 | unsigned int len = 0, last_len, prev_len = qdisc_pkt_len(skb); |
| 443 | int nb = 0; |
Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 444 | int count = 1; |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 445 | int rc = NET_XMIT_SUCCESS; |
Sheng Lan | 5845f70 | 2019-02-28 18:47:58 +0800 | [diff] [blame] | 446 | int rc_drop = NET_XMIT_DROP; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 447 | |
Christoph Paasch | 9410d38 | 2018-11-29 16:01:04 -0800 | [diff] [blame] | 448 | /* Do not fool qdisc_drop_all() */ |
| 449 | skb->prev = NULL; |
| 450 | |
Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 451 | /* Random duplication */ |
| 452 | if (q->duplicate && q->duplicate >= get_crandom(&q->dup_cor)) |
| 453 | ++count; |
| 454 | |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 455 | /* Drop packet? */ |
Eric Dumazet | e4ae004 | 2012-04-30 23:11:05 +0000 | [diff] [blame] | 456 | if (loss_event(q)) { |
| 457 | if (q->ecn && INET_ECN_set_ce(skb)) |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 458 | qdisc_qstats_drop(sch); /* mark packet */ |
Eric Dumazet | e4ae004 | 2012-04-30 23:11:05 +0000 | [diff] [blame] | 459 | else |
| 460 | --count; |
| 461 | } |
Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 462 | if (count == 0) { |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 463 | qdisc_qstats_drop(sch); |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 464 | __qdisc_drop(skb, to_free); |
Jarek Poplawski | c27f339 | 2008-08-04 22:39:11 -0700 | [diff] [blame] | 465 | return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 466 | } |
| 467 | |
Eric Dumazet | 5a308f4 | 2012-07-14 03:16:27 +0000 | [diff] [blame] | 468 | /* If a delay is expected, orphan the skb. (orphaning usually takes |
| 469 | * place at TX completion time, so _before_ the link transit delay) |
Eric Dumazet | 5a308f4 | 2012-07-14 03:16:27 +0000 | [diff] [blame] | 470 | */ |
Nik Unger | 5080f39 | 2017-03-13 10:16:58 -0700 | [diff] [blame] | 471 | if (q->latency || q->jitter || q->rate) |
Eric Dumazet | f2f872f | 2013-07-30 17:55:08 -0700 | [diff] [blame] | 472 | skb_orphan_partial(skb); |
David S. Miller | 4e8a520 | 2006-10-22 21:00:33 -0700 | [diff] [blame] | 473 | |
Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 474 | /* |
| 475 | * If we need to duplicate packet, then re-insert at top of the |
| 476 | * qdisc tree, since parent queuer expects that only one |
| 477 | * skb will be queued. |
| 478 | */ |
| 479 | if (count > 1 && (skb2 = skb_clone(skb, GFP_ATOMIC)) != NULL) { |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 480 | struct Qdisc *rootq = qdisc_root(sch); |
Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 481 | u32 dupsave = q->duplicate; /* prevent duplicating a dup... */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 482 | |
Eric Dumazet | b396cca | 2015-05-11 09:06:56 -0700 | [diff] [blame] | 483 | q->duplicate = 0; |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 484 | rootq->enqueue(skb2, rootq, to_free); |
Stephen Hemminger | 0afb51e | 2005-05-26 12:53:49 -0700 | [diff] [blame] | 485 | q->duplicate = dupsave; |
Sheng Lan | 5845f70 | 2019-02-28 18:47:58 +0800 | [diff] [blame] | 486 | rc_drop = NET_XMIT_SUCCESS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | } |
| 488 | |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 489 | /* |
| 490 | * Randomized packet corruption. |
| 491 | * Make copy if needed since we are modifying |
| 492 | * If packet is going to be hardware checksummed, then |
| 493 | * do it now in software before we mangle it. |
| 494 | */ |
| 495 | if (q->corrupt && q->corrupt >= get_crandom(&q->corrupt_cor)) { |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 496 | if (skb_is_gso(skb)) { |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 497 | segs = netem_segment(skb, sch, to_free); |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 498 | if (!segs) |
Sheng Lan | 5845f70 | 2019-02-28 18:47:58 +0800 | [diff] [blame] | 499 | return rc_drop; |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 500 | } else { |
| 501 | segs = skb; |
| 502 | } |
| 503 | |
| 504 | skb = segs; |
| 505 | segs = segs->next; |
| 506 | |
Eric Dumazet | 8a6e9c6 | 2016-06-28 10:30:08 +0200 | [diff] [blame] | 507 | skb = skb_unshare(skb, GFP_ATOMIC); |
| 508 | if (unlikely(!skb)) { |
| 509 | qdisc_qstats_drop(sch); |
| 510 | goto finish_segs; |
| 511 | } |
| 512 | if (skb->ip_summed == CHECKSUM_PARTIAL && |
| 513 | skb_checksum_help(skb)) { |
| 514 | qdisc_drop(skb, sch, to_free); |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 515 | goto finish_segs; |
| 516 | } |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 517 | |
Aruna-Hewapathirane | 63862b5 | 2014-01-11 07:15:59 -0500 | [diff] [blame] | 518 | skb->data[prandom_u32() % skb_headlen(skb)] ^= |
| 519 | 1<<(prandom_u32() % 8); |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 520 | } |
| 521 | |
Sheng Lan | 5845f70 | 2019-02-28 18:47:58 +0800 | [diff] [blame] | 522 | if (unlikely(sch->q.qlen >= sch->limit)) { |
| 523 | qdisc_drop_all(skb, sch, to_free); |
| 524 | return rc_drop; |
| 525 | } |
Eric Dumazet | 960fb66 | 2012-07-03 20:55:21 +0000 | [diff] [blame] | 526 | |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 527 | qdisc_qstats_backlog_inc(sch, skb); |
Eric Dumazet | 960fb66 | 2012-07-03 20:55:21 +0000 | [diff] [blame] | 528 | |
Jussi Kivilinna | 5f86173 | 2008-07-20 00:08:04 -0700 | [diff] [blame] | 529 | cb = netem_skb_cb(skb); |
Eric Dumazet | cc7ec45 | 2011-01-19 19:26:56 +0000 | [diff] [blame] | 530 | if (q->gap == 0 || /* not doing reordering */ |
Vijay Subramanian | a42b479 | 2012-01-19 10:20:59 +0000 | [diff] [blame] | 531 | q->counter < q->gap - 1 || /* inside last reordering gap */ |
Joe Perches | f64f9e7 | 2009-11-29 16:55:45 -0800 | [diff] [blame] | 532 | q->reorder < get_crandom(&q->reorder_cor)) { |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 533 | u64 now; |
| 534 | s64 delay; |
Stephen Hemminger | 07aaa11 | 2005-11-03 13:43:07 -0800 | [diff] [blame] | 535 | |
| 536 | delay = tabledist(q->latency, q->jitter, |
| 537 | &q->delay_cor, q->delay_dist); |
| 538 | |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 539 | now = ktime_get_ns(); |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 540 | |
| 541 | if (q->rate) { |
Nik Unger | 5080f39 | 2017-03-13 10:16:58 -0700 | [diff] [blame] | 542 | struct netem_skb_cb *last = NULL; |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 543 | |
Nik Unger | 5080f39 | 2017-03-13 10:16:58 -0700 | [diff] [blame] | 544 | if (sch->q.tail) |
| 545 | last = netem_skb_cb(sch->q.tail); |
| 546 | if (q->t_root.rb_node) { |
| 547 | struct sk_buff *t_skb; |
| 548 | struct netem_skb_cb *t_last; |
| 549 | |
Eric Dumazet | 18a4c0e | 2017-10-05 22:21:21 -0700 | [diff] [blame] | 550 | t_skb = skb_rb_last(&q->t_root); |
Nik Unger | 5080f39 | 2017-03-13 10:16:58 -0700 | [diff] [blame] | 551 | t_last = netem_skb_cb(t_skb); |
| 552 | if (!last || |
Peter Oskolkov | d66280b | 2018-12-04 11:55:56 -0800 | [diff] [blame] | 553 | t_last->time_to_send > last->time_to_send) |
Nik Unger | 5080f39 | 2017-03-13 10:16:58 -0700 | [diff] [blame] | 554 | last = t_last; |
Peter Oskolkov | d66280b | 2018-12-04 11:55:56 -0800 | [diff] [blame] | 555 | } |
| 556 | if (q->t_tail) { |
| 557 | struct netem_skb_cb *t_last = |
| 558 | netem_skb_cb(q->t_tail); |
| 559 | |
| 560 | if (!last || |
| 561 | t_last->time_to_send > last->time_to_send) |
| 562 | last = t_last; |
Nik Unger | 5080f39 | 2017-03-13 10:16:58 -0700 | [diff] [blame] | 563 | } |
| 564 | |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 565 | if (last) { |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 566 | /* |
Johannes Naab | a13d310 | 2013-01-23 11:36:51 +0000 | [diff] [blame] | 567 | * Last packet in queue is reference point (now), |
| 568 | * calculate this time bonus and subtract |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 569 | * from delay. |
| 570 | */ |
Nik Unger | 5080f39 | 2017-03-13 10:16:58 -0700 | [diff] [blame] | 571 | delay -= last->time_to_send - now; |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 572 | delay = max_t(s64, 0, delay); |
Nik Unger | 5080f39 | 2017-03-13 10:16:58 -0700 | [diff] [blame] | 573 | now = last->time_to_send; |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 574 | } |
Johannes Naab | a13d310 | 2013-01-23 11:36:51 +0000 | [diff] [blame] | 575 | |
Stephen Hemminger | bce552f | 2017-11-14 11:27:01 -0800 | [diff] [blame] | 576 | delay += packet_time_ns(qdisc_pkt_len(skb), q); |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 577 | } |
| 578 | |
Patrick McHardy | 7c59e25 | 2007-03-23 11:27:45 -0700 | [diff] [blame] | 579 | cb->time_to_send = now + delay; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 580 | ++q->counter; |
Eric Dumazet | 960fb66 | 2012-07-03 20:55:21 +0000 | [diff] [blame] | 581 | tfifo_enqueue(skb, sch); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 582 | } else { |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 583 | /* |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 584 | * Do re-ordering by putting one out of N packets at the front |
| 585 | * of the queue. |
| 586 | */ |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 587 | cb->time_to_send = ktime_get_ns(); |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 588 | q->counter = 0; |
Jarek Poplawski | 8ba25da | 2008-11-02 00:36:03 -0700 | [diff] [blame] | 589 | |
David S. Miller | 5969773 | 2018-07-29 16:33:28 -0700 | [diff] [blame] | 590 | __qdisc_enqueue_head(skb, &sch->q); |
Hagen Paul Pfeifer | eb10192 | 2012-01-04 17:35:26 +0000 | [diff] [blame] | 591 | sch->qstats.requeues++; |
Jarek Poplawski | 378a2f0 | 2008-08-04 22:31:03 -0700 | [diff] [blame] | 592 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 594 | finish_segs: |
| 595 | if (segs) { |
| 596 | while (segs) { |
| 597 | skb2 = segs->next; |
David S. Miller | a8305bf | 2018-07-29 20:42:53 -0700 | [diff] [blame] | 598 | skb_mark_not_on_list(segs); |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 599 | qdisc_skb_cb(segs)->pkt_len = segs->len; |
| 600 | last_len = segs->len; |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 601 | rc = qdisc_enqueue(segs, sch, to_free); |
Neil Horman | 6071bd1 | 2016-05-02 12:20:15 -0400 | [diff] [blame] | 602 | if (rc != NET_XMIT_SUCCESS) { |
| 603 | if (net_xmit_drop_count(rc)) |
| 604 | qdisc_qstats_drop(sch); |
| 605 | } else { |
| 606 | nb++; |
| 607 | len += last_len; |
| 608 | } |
| 609 | segs = skb2; |
| 610 | } |
| 611 | sch->q.qlen += nb; |
| 612 | if (nb > 1) |
| 613 | qdisc_tree_reduce_backlog(sch, 1 - nb, prev_len - len); |
| 614 | } |
stephen hemminger | 10f6dfc | 2011-02-23 13:04:20 +0000 | [diff] [blame] | 615 | return NET_XMIT_SUCCESS; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 616 | } |
| 617 | |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 618 | /* Delay the next round with a new future slot with a |
| 619 | * correct number of bytes and packets. |
| 620 | */ |
| 621 | |
| 622 | static void get_slot_next(struct netem_sched_data *q, u64 now) |
| 623 | { |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 624 | s64 next_delay; |
| 625 | |
| 626 | if (!q->slot_dist) |
| 627 | next_delay = q->slot_config.min_delay + |
| 628 | (prandom_u32() * |
| 629 | (q->slot_config.max_delay - |
| 630 | q->slot_config.min_delay) >> 32); |
| 631 | else |
| 632 | next_delay = tabledist(q->slot_config.dist_delay, |
| 633 | (s32)(q->slot_config.dist_jitter), |
| 634 | NULL, q->slot_dist); |
| 635 | |
| 636 | q->slot.slot_next = now + next_delay; |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 637 | q->slot.packets_left = q->slot_config.max_packets; |
| 638 | q->slot.bytes_left = q->slot_config.max_bytes; |
| 639 | } |
| 640 | |
Peter Oskolkov | d66280b | 2018-12-04 11:55:56 -0800 | [diff] [blame] | 641 | static struct sk_buff *netem_peek(struct netem_sched_data *q) |
| 642 | { |
| 643 | struct sk_buff *skb = skb_rb_first(&q->t_root); |
| 644 | u64 t1, t2; |
| 645 | |
| 646 | if (!skb) |
| 647 | return q->t_head; |
| 648 | if (!q->t_head) |
| 649 | return skb; |
| 650 | |
| 651 | t1 = netem_skb_cb(skb)->time_to_send; |
| 652 | t2 = netem_skb_cb(q->t_head)->time_to_send; |
| 653 | if (t1 < t2) |
| 654 | return skb; |
| 655 | return q->t_head; |
| 656 | } |
| 657 | |
| 658 | static void netem_erase_head(struct netem_sched_data *q, struct sk_buff *skb) |
| 659 | { |
| 660 | if (skb == q->t_head) { |
| 661 | q->t_head = skb->next; |
| 662 | if (!q->t_head) |
| 663 | q->t_tail = NULL; |
| 664 | } else { |
| 665 | rb_erase(&skb->rbnode, &q->t_root); |
| 666 | } |
| 667 | } |
| 668 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | static struct sk_buff *netem_dequeue(struct Qdisc *sch) |
| 670 | { |
| 671 | struct netem_sched_data *q = qdisc_priv(sch); |
| 672 | struct sk_buff *skb; |
| 673 | |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 674 | tfifo_dequeue: |
Florian Westphal | ed760cb | 2016-09-18 00:57:33 +0200 | [diff] [blame] | 675 | skb = __qdisc_dequeue_head(&sch->q); |
Stephen Hemminger | 771018e | 2005-05-03 16:24:32 -0700 | [diff] [blame] | 676 | if (skb) { |
John Fastabend | 25331d6 | 2014-09-28 11:53:29 -0700 | [diff] [blame] | 677 | qdisc_qstats_backlog_dec(sch, skb); |
Beshay, Joseph | 0ad2a83 | 2015-04-06 18:00:56 +0000 | [diff] [blame] | 678 | deliver: |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 679 | qdisc_bstats_update(sch, skb); |
| 680 | return skb; |
| 681 | } |
Peter Oskolkov | d66280b | 2018-12-04 11:55:56 -0800 | [diff] [blame] | 682 | skb = netem_peek(q); |
| 683 | if (skb) { |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 684 | u64 time_to_send; |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 685 | u64 now = ktime_get_ns(); |
Eric Dumazet | 36b7bfe | 2013-07-03 14:04:14 -0700 | [diff] [blame] | 686 | |
Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 687 | /* if more time remaining? */ |
Eric Dumazet | 36b7bfe | 2013-07-03 14:04:14 -0700 | [diff] [blame] | 688 | time_to_send = netem_skb_cb(skb)->time_to_send; |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 689 | if (q->slot.slot_next && q->slot.slot_next < time_to_send) |
| 690 | get_slot_next(q, now); |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 691 | |
Peter Oskolkov | d66280b | 2018-12-04 11:55:56 -0800 | [diff] [blame] | 692 | if (time_to_send <= now && q->slot.slot_next <= now) { |
| 693 | netem_erase_head(q, skb); |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 694 | sch->q.qlen--; |
Beshay, Joseph | 0ad2a83 | 2015-04-06 18:00:56 +0000 | [diff] [blame] | 695 | qdisc_qstats_backlog_dec(sch, skb); |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 696 | skb->next = NULL; |
| 697 | skb->prev = NULL; |
Eric Dumazet | bffa72c | 2017-09-19 05:14:24 -0700 | [diff] [blame] | 698 | /* skb->dev shares skb->rbnode area, |
| 699 | * we need to restore its value. |
| 700 | */ |
| 701 | skb->dev = qdisc_dev(sch); |
Jarek Poplawski | 03c05f0 | 2008-10-31 00:46:19 -0700 | [diff] [blame] | 702 | |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 703 | if (q->slot.slot_next) { |
| 704 | q->slot.packets_left--; |
| 705 | q->slot.bytes_left -= qdisc_pkt_len(skb); |
| 706 | if (q->slot.packets_left <= 0 || |
| 707 | q->slot.bytes_left <= 0) |
| 708 | get_slot_next(q, now); |
| 709 | } |
| 710 | |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 711 | if (q->qdisc) { |
Eric Dumazet | 21de12e | 2016-06-20 15:00:43 -0700 | [diff] [blame] | 712 | unsigned int pkt_len = qdisc_pkt_len(skb); |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 713 | struct sk_buff *to_free = NULL; |
| 714 | int err; |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 715 | |
Eric Dumazet | 520ac30 | 2016-06-21 23:16:49 -0700 | [diff] [blame] | 716 | err = qdisc_enqueue(skb, q->qdisc, &to_free); |
| 717 | kfree_skb_list(to_free); |
Eric Dumazet | 21de12e | 2016-06-20 15:00:43 -0700 | [diff] [blame] | 718 | if (err != NET_XMIT_SUCCESS && |
| 719 | net_xmit_drop_count(err)) { |
| 720 | qdisc_qstats_drop(sch); |
| 721 | qdisc_tree_reduce_backlog(sch, 1, |
| 722 | pkt_len); |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 723 | } |
| 724 | goto tfifo_dequeue; |
| 725 | } |
Eric Dumazet | aec0a40 | 2013-06-28 07:40:57 -0700 | [diff] [blame] | 726 | goto deliver; |
Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 727 | } |
Stephen Hemminger | 11274e5 | 2007-03-22 12:17:42 -0700 | [diff] [blame] | 728 | |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 729 | if (q->qdisc) { |
| 730 | skb = q->qdisc->ops->dequeue(q->qdisc); |
| 731 | if (skb) |
| 732 | goto deliver; |
| 733 | } |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 734 | |
| 735 | qdisc_watchdog_schedule_ns(&q->watchdog, |
| 736 | max(time_to_send, |
| 737 | q->slot.slot_next)); |
Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 738 | } |
| 739 | |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 740 | if (q->qdisc) { |
| 741 | skb = q->qdisc->ops->dequeue(q->qdisc); |
| 742 | if (skb) |
| 743 | goto deliver; |
| 744 | } |
Stephen Hemminger | 0f9f32a | 2005-05-26 12:55:01 -0700 | [diff] [blame] | 745 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | } |
| 747 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 748 | static void netem_reset(struct Qdisc *sch) |
| 749 | { |
| 750 | struct netem_sched_data *q = qdisc_priv(sch); |
| 751 | |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 752 | qdisc_reset_queue(sch); |
stephen hemminger | ff70405 | 2013-10-06 15:16:49 -0700 | [diff] [blame] | 753 | tfifo_reset(sch); |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 754 | if (q->qdisc) |
| 755 | qdisc_reset(q->qdisc); |
Patrick McHardy | 59cb5c6 | 2007-03-16 01:20:31 -0700 | [diff] [blame] | 756 | qdisc_watchdog_cancel(&q->watchdog); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 757 | } |
| 758 | |
stephen hemminger | 6373a9a | 2011-02-23 13:04:18 +0000 | [diff] [blame] | 759 | static void dist_free(struct disttable *d) |
| 760 | { |
WANG Cong | 4cb2897 | 2014-06-02 15:55:22 -0700 | [diff] [blame] | 761 | kvfree(d); |
stephen hemminger | 6373a9a | 2011-02-23 13:04:18 +0000 | [diff] [blame] | 762 | } |
| 763 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 764 | /* |
| 765 | * Distribution data is a variable size payload containing |
| 766 | * signed 16 bit values. |
| 767 | */ |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 768 | |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 769 | static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, |
| 770 | const struct nlattr *attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 771 | { |
stephen hemminger | 6373a9a | 2011-02-23 13:04:18 +0000 | [diff] [blame] | 772 | size_t n = nla_len(attr)/sizeof(__s16); |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 773 | const __s16 *data = nla_data(attr); |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 774 | spinlock_t *root_lock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 775 | struct disttable *d; |
| 776 | int i; |
| 777 | |
stephen hemminger | df173bd | 2011-02-23 13:04:19 +0000 | [diff] [blame] | 778 | if (n > NETEM_DIST_MAX) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 779 | return -EINVAL; |
| 780 | |
Michal Hocko | 752ade6 | 2017-05-08 15:57:27 -0700 | [diff] [blame] | 781 | d = kvmalloc(sizeof(struct disttable) + n * sizeof(s16), GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 782 | if (!d) |
| 783 | return -ENOMEM; |
| 784 | |
| 785 | d->size = n; |
| 786 | for (i = 0; i < n; i++) |
| 787 | d->table[i] = data[i]; |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 788 | |
Jarek Poplawski | 102396a | 2008-08-29 14:21:52 -0700 | [diff] [blame] | 789 | root_lock = qdisc_root_sleeping_lock(sch); |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 790 | |
| 791 | spin_lock_bh(root_lock); |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 792 | swap(*tbl, d); |
David S. Miller | 7698b4f | 2008-07-16 01:42:40 -0700 | [diff] [blame] | 793 | spin_unlock_bh(root_lock); |
Eric Dumazet | bb52c7a | 2011-12-23 19:28:51 +0000 | [diff] [blame] | 794 | |
| 795 | dist_free(d); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 796 | return 0; |
| 797 | } |
| 798 | |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 799 | static void get_slot(struct netem_sched_data *q, const struct nlattr *attr) |
| 800 | { |
| 801 | const struct tc_netem_slot *c = nla_data(attr); |
| 802 | |
| 803 | q->slot_config = *c; |
| 804 | if (q->slot_config.max_packets == 0) |
| 805 | q->slot_config.max_packets = INT_MAX; |
| 806 | if (q->slot_config.max_bytes == 0) |
| 807 | q->slot_config.max_bytes = INT_MAX; |
| 808 | q->slot.packets_left = q->slot_config.max_packets; |
| 809 | q->slot.bytes_left = q->slot_config.max_bytes; |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 810 | if (q->slot_config.min_delay | q->slot_config.max_delay | |
| 811 | q->slot_config.dist_jitter) |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 812 | q->slot.slot_next = ktime_get_ns(); |
| 813 | else |
| 814 | q->slot.slot_next = 0; |
| 815 | } |
| 816 | |
Yang Yingliang | 49545a7 | 2014-02-14 10:30:42 +0800 | [diff] [blame] | 817 | static void get_correlation(struct netem_sched_data *q, const struct nlattr *attr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 818 | { |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 819 | const struct tc_netem_corr *c = nla_data(attr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 820 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 821 | init_crandom(&q->delay_cor, c->delay_corr); |
| 822 | init_crandom(&q->loss_cor, c->loss_corr); |
| 823 | init_crandom(&q->dup_cor, c->dup_corr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 | } |
| 825 | |
Yang Yingliang | 49545a7 | 2014-02-14 10:30:42 +0800 | [diff] [blame] | 826 | static void get_reorder(struct netem_sched_data *q, const struct nlattr *attr) |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 827 | { |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 828 | const struct tc_netem_reorder *r = nla_data(attr); |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 829 | |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 830 | q->reorder = r->probability; |
| 831 | init_crandom(&q->reorder_cor, r->correlation); |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 832 | } |
| 833 | |
Yang Yingliang | 49545a7 | 2014-02-14 10:30:42 +0800 | [diff] [blame] | 834 | static void get_corrupt(struct netem_sched_data *q, const struct nlattr *attr) |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 835 | { |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 836 | const struct tc_netem_corrupt *r = nla_data(attr); |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 837 | |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 838 | q->corrupt = r->probability; |
| 839 | init_crandom(&q->corrupt_cor, r->correlation); |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 840 | } |
| 841 | |
Yang Yingliang | 49545a7 | 2014-02-14 10:30:42 +0800 | [diff] [blame] | 842 | static void get_rate(struct netem_sched_data *q, const struct nlattr *attr) |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 843 | { |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 844 | const struct tc_netem_rate *r = nla_data(attr); |
| 845 | |
| 846 | q->rate = r->rate; |
Hagen Paul Pfeifer | 90b41a1 | 2011-12-12 14:30:00 +0000 | [diff] [blame] | 847 | q->packet_overhead = r->packet_overhead; |
| 848 | q->cell_size = r->cell_size; |
Hannes Frederic Sowa | 809fa97 | 2014-01-22 02:29:41 +0100 | [diff] [blame] | 849 | q->cell_overhead = r->cell_overhead; |
Hagen Paul Pfeifer | 90b41a1 | 2011-12-12 14:30:00 +0000 | [diff] [blame] | 850 | if (q->cell_size) |
| 851 | q->cell_size_reciprocal = reciprocal_value(q->cell_size); |
Hannes Frederic Sowa | 809fa97 | 2014-01-22 02:29:41 +0100 | [diff] [blame] | 852 | else |
| 853 | q->cell_size_reciprocal = (struct reciprocal_value) { 0 }; |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 854 | } |
| 855 | |
Yang Yingliang | 49545a7 | 2014-02-14 10:30:42 +0800 | [diff] [blame] | 856 | static int get_loss_clg(struct netem_sched_data *q, const struct nlattr *attr) |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 857 | { |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 858 | const struct nlattr *la; |
| 859 | int rem; |
| 860 | |
| 861 | nla_for_each_nested(la, attr, rem) { |
| 862 | u16 type = nla_type(la); |
| 863 | |
Yang Yingliang | 833fa74 | 2013-12-10 20:55:32 +0800 | [diff] [blame] | 864 | switch (type) { |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 865 | case NETEM_LOSS_GI: { |
| 866 | const struct tc_netem_gimodel *gi = nla_data(la); |
| 867 | |
stephen hemminger | 2494654 | 2011-12-23 09:16:30 +0000 | [diff] [blame] | 868 | if (nla_len(la) < sizeof(struct tc_netem_gimodel)) { |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 869 | pr_info("netem: incorrect gi model size\n"); |
| 870 | return -EINVAL; |
| 871 | } |
| 872 | |
| 873 | q->loss_model = CLG_4_STATES; |
| 874 | |
Yang Yingliang | 3fbac2a | 2014-02-17 16:48:21 +0800 | [diff] [blame] | 875 | q->clg.state = TX_IN_GAP_PERIOD; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 876 | q->clg.a1 = gi->p13; |
| 877 | q->clg.a2 = gi->p31; |
| 878 | q->clg.a3 = gi->p32; |
| 879 | q->clg.a4 = gi->p14; |
| 880 | q->clg.a5 = gi->p23; |
| 881 | break; |
| 882 | } |
| 883 | |
| 884 | case NETEM_LOSS_GE: { |
| 885 | const struct tc_netem_gemodel *ge = nla_data(la); |
| 886 | |
stephen hemminger | 2494654 | 2011-12-23 09:16:30 +0000 | [diff] [blame] | 887 | if (nla_len(la) < sizeof(struct tc_netem_gemodel)) { |
| 888 | pr_info("netem: incorrect ge model size\n"); |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 889 | return -EINVAL; |
| 890 | } |
| 891 | |
| 892 | q->loss_model = CLG_GILB_ELL; |
Yang Yingliang | 3fbac2a | 2014-02-17 16:48:21 +0800 | [diff] [blame] | 893 | q->clg.state = GOOD_STATE; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 894 | q->clg.a1 = ge->p; |
| 895 | q->clg.a2 = ge->r; |
| 896 | q->clg.a3 = ge->h; |
| 897 | q->clg.a4 = ge->k1; |
| 898 | break; |
| 899 | } |
| 900 | |
| 901 | default: |
| 902 | pr_info("netem: unknown loss type %u\n", type); |
| 903 | return -EINVAL; |
| 904 | } |
| 905 | } |
| 906 | |
| 907 | return 0; |
| 908 | } |
| 909 | |
Patrick McHardy | 27a3421 | 2008-01-23 20:35:39 -0800 | [diff] [blame] | 910 | static const struct nla_policy netem_policy[TCA_NETEM_MAX + 1] = { |
| 911 | [TCA_NETEM_CORR] = { .len = sizeof(struct tc_netem_corr) }, |
| 912 | [TCA_NETEM_REORDER] = { .len = sizeof(struct tc_netem_reorder) }, |
| 913 | [TCA_NETEM_CORRUPT] = { .len = sizeof(struct tc_netem_corrupt) }, |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 914 | [TCA_NETEM_RATE] = { .len = sizeof(struct tc_netem_rate) }, |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 915 | [TCA_NETEM_LOSS] = { .type = NLA_NESTED }, |
Eric Dumazet | e4ae004 | 2012-04-30 23:11:05 +0000 | [diff] [blame] | 916 | [TCA_NETEM_ECN] = { .type = NLA_U32 }, |
Yang Yingliang | 6a031f6 | 2013-12-25 17:35:15 +0800 | [diff] [blame] | 917 | [TCA_NETEM_RATE64] = { .type = NLA_U64 }, |
Dave Taht | 9980317 | 2017-11-08 15:12:27 -0800 | [diff] [blame] | 918 | [TCA_NETEM_LATENCY64] = { .type = NLA_S64 }, |
| 919 | [TCA_NETEM_JITTER64] = { .type = NLA_S64 }, |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 920 | [TCA_NETEM_SLOT] = { .len = sizeof(struct tc_netem_slot) }, |
Patrick McHardy | 27a3421 | 2008-01-23 20:35:39 -0800 | [diff] [blame] | 921 | }; |
| 922 | |
Thomas Graf | 2c10b32 | 2008-09-02 17:30:27 -0700 | [diff] [blame] | 923 | static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, |
| 924 | const struct nla_policy *policy, int len) |
| 925 | { |
| 926 | int nested_len = nla_len(nla) - NLA_ALIGN(len); |
| 927 | |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 928 | if (nested_len < 0) { |
| 929 | pr_info("netem: invalid attributes len %d\n", nested_len); |
Thomas Graf | 2c10b32 | 2008-09-02 17:30:27 -0700 | [diff] [blame] | 930 | return -EINVAL; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 931 | } |
| 932 | |
Thomas Graf | 2c10b32 | 2008-09-02 17:30:27 -0700 | [diff] [blame] | 933 | if (nested_len >= nla_attr_size(0)) |
Johannes Berg | 8cb0817 | 2019-04-26 14:07:28 +0200 | [diff] [blame] | 934 | return nla_parse_deprecated(tb, maxtype, |
| 935 | nla_data(nla) + NLA_ALIGN(len), |
| 936 | nested_len, policy, NULL); |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 937 | |
Thomas Graf | 2c10b32 | 2008-09-02 17:30:27 -0700 | [diff] [blame] | 938 | memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); |
| 939 | return 0; |
| 940 | } |
| 941 | |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 942 | /* Parse netlink message to set options */ |
Alexander Aring | 2030721 | 2017-12-20 12:35:14 -0500 | [diff] [blame] | 943 | static int netem_change(struct Qdisc *sch, struct nlattr *opt, |
| 944 | struct netlink_ext_ack *extack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 945 | { |
| 946 | struct netem_sched_data *q = qdisc_priv(sch); |
Patrick McHardy | b03f467 | 2008-01-23 20:32:21 -0800 | [diff] [blame] | 947 | struct nlattr *tb[TCA_NETEM_MAX + 1]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 948 | struct tc_netem_qopt *qopt; |
Yang Yingliang | 54a4b05 | 2014-02-14 10:30:41 +0800 | [diff] [blame] | 949 | struct clgstate old_clg; |
| 950 | int old_loss_model = CLG_RANDOM; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 951 | int ret; |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 952 | |
Patrick McHardy | b03f467 | 2008-01-23 20:32:21 -0800 | [diff] [blame] | 953 | if (opt == NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 954 | return -EINVAL; |
| 955 | |
Thomas Graf | 2c10b32 | 2008-09-02 17:30:27 -0700 | [diff] [blame] | 956 | qopt = nla_data(opt); |
| 957 | ret = parse_attr(tb, TCA_NETEM_MAX, opt, netem_policy, sizeof(*qopt)); |
Patrick McHardy | b03f467 | 2008-01-23 20:32:21 -0800 | [diff] [blame] | 958 | if (ret < 0) |
| 959 | return ret; |
| 960 | |
Yang Yingliang | 54a4b05 | 2014-02-14 10:30:41 +0800 | [diff] [blame] | 961 | /* backup q->clg and q->loss_model */ |
| 962 | old_clg = q->clg; |
| 963 | old_loss_model = q->loss_model; |
| 964 | |
| 965 | if (tb[TCA_NETEM_LOSS]) { |
Yang Yingliang | 49545a7 | 2014-02-14 10:30:42 +0800 | [diff] [blame] | 966 | ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); |
Yang Yingliang | 54a4b05 | 2014-02-14 10:30:41 +0800 | [diff] [blame] | 967 | if (ret) { |
| 968 | q->loss_model = old_loss_model; |
| 969 | return ret; |
| 970 | } |
| 971 | } else { |
| 972 | q->loss_model = CLG_RANDOM; |
| 973 | } |
| 974 | |
| 975 | if (tb[TCA_NETEM_DELAY_DIST]) { |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 976 | ret = get_dist_table(sch, &q->delay_dist, |
| 977 | tb[TCA_NETEM_DELAY_DIST]); |
| 978 | if (ret) |
| 979 | goto get_table_failure; |
| 980 | } |
| 981 | |
| 982 | if (tb[TCA_NETEM_SLOT_DIST]) { |
| 983 | ret = get_dist_table(sch, &q->slot_dist, |
| 984 | tb[TCA_NETEM_SLOT_DIST]); |
| 985 | if (ret) |
| 986 | goto get_table_failure; |
Yang Yingliang | 54a4b05 | 2014-02-14 10:30:41 +0800 | [diff] [blame] | 987 | } |
| 988 | |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 989 | sch->limit = qopt->limit; |
YOSHIFUJI Hideaki | 10297b9 | 2007-02-09 23:25:16 +0900 | [diff] [blame] | 990 | |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 991 | q->latency = PSCHED_TICKS2NS(qopt->latency); |
| 992 | q->jitter = PSCHED_TICKS2NS(qopt->jitter); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 993 | q->limit = qopt->limit; |
| 994 | q->gap = qopt->gap; |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 995 | q->counter = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 996 | q->loss = qopt->loss; |
| 997 | q->duplicate = qopt->duplicate; |
| 998 | |
Stephen Hemminger | bb2f8cc | 2007-03-23 00:12:09 -0700 | [diff] [blame] | 999 | /* for compatibility with earlier versions. |
| 1000 | * if gap is set, need to assume 100% probability |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 1001 | */ |
Stephen Hemminger | a362e0a | 2007-03-22 12:15:45 -0700 | [diff] [blame] | 1002 | if (q->gap) |
| 1003 | q->reorder = ~0; |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 1004 | |
Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 1005 | if (tb[TCA_NETEM_CORR]) |
Yang Yingliang | 49545a7 | 2014-02-14 10:30:42 +0800 | [diff] [blame] | 1006 | get_correlation(q, tb[TCA_NETEM_CORR]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1007 | |
Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 1008 | if (tb[TCA_NETEM_REORDER]) |
Yang Yingliang | 49545a7 | 2014-02-14 10:30:42 +0800 | [diff] [blame] | 1009 | get_reorder(q, tb[TCA_NETEM_REORDER]); |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 1010 | |
Stephen Hemminger | 265eb67 | 2008-11-03 21:13:26 -0800 | [diff] [blame] | 1011 | if (tb[TCA_NETEM_CORRUPT]) |
Yang Yingliang | 49545a7 | 2014-02-14 10:30:42 +0800 | [diff] [blame] | 1012 | get_corrupt(q, tb[TCA_NETEM_CORRUPT]); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1013 | |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 1014 | if (tb[TCA_NETEM_RATE]) |
Yang Yingliang | 49545a7 | 2014-02-14 10:30:42 +0800 | [diff] [blame] | 1015 | get_rate(q, tb[TCA_NETEM_RATE]); |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 1016 | |
Yang Yingliang | 6a031f6 | 2013-12-25 17:35:15 +0800 | [diff] [blame] | 1017 | if (tb[TCA_NETEM_RATE64]) |
| 1018 | q->rate = max_t(u64, q->rate, |
| 1019 | nla_get_u64(tb[TCA_NETEM_RATE64])); |
| 1020 | |
Dave Taht | 9980317 | 2017-11-08 15:12:27 -0800 | [diff] [blame] | 1021 | if (tb[TCA_NETEM_LATENCY64]) |
| 1022 | q->latency = nla_get_s64(tb[TCA_NETEM_LATENCY64]); |
| 1023 | |
| 1024 | if (tb[TCA_NETEM_JITTER64]) |
| 1025 | q->jitter = nla_get_s64(tb[TCA_NETEM_JITTER64]); |
| 1026 | |
Eric Dumazet | e4ae004 | 2012-04-30 23:11:05 +0000 | [diff] [blame] | 1027 | if (tb[TCA_NETEM_ECN]) |
| 1028 | q->ecn = nla_get_u32(tb[TCA_NETEM_ECN]); |
| 1029 | |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 1030 | if (tb[TCA_NETEM_SLOT]) |
| 1031 | get_slot(q, tb[TCA_NETEM_SLOT]); |
| 1032 | |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 1033 | return ret; |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 1034 | |
| 1035 | get_table_failure: |
| 1036 | /* recover clg and loss_model, in case of |
| 1037 | * q->clg and q->loss_model were modified |
| 1038 | * in get_loss_clg() |
| 1039 | */ |
| 1040 | q->clg = old_clg; |
| 1041 | q->loss_model = old_loss_model; |
| 1042 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1043 | } |
| 1044 | |
Alexander Aring | e63d7df | 2017-12-20 12:35:13 -0500 | [diff] [blame] | 1045 | static int netem_init(struct Qdisc *sch, struct nlattr *opt, |
| 1046 | struct netlink_ext_ack *extack) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1047 | { |
| 1048 | struct netem_sched_data *q = qdisc_priv(sch); |
| 1049 | int ret; |
| 1050 | |
Nikolay Aleksandrov | 634576a | 2017-08-30 12:49:03 +0300 | [diff] [blame] | 1051 | qdisc_watchdog_init(&q->watchdog, sch); |
| 1052 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1053 | if (!opt) |
| 1054 | return -EINVAL; |
| 1055 | |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 1056 | q->loss_model = CLG_RANDOM; |
Alexander Aring | 2030721 | 2017-12-20 12:35:14 -0500 | [diff] [blame] | 1057 | ret = netem_change(sch, opt, extack); |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 1058 | if (ret) |
stephen hemminger | 250a65f | 2011-02-23 13:04:22 +0000 | [diff] [blame] | 1059 | pr_info("netem: change failed\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1060 | return ret; |
| 1061 | } |
| 1062 | |
| 1063 | static void netem_destroy(struct Qdisc *sch) |
| 1064 | { |
| 1065 | struct netem_sched_data *q = qdisc_priv(sch); |
| 1066 | |
Patrick McHardy | 59cb5c6 | 2007-03-16 01:20:31 -0700 | [diff] [blame] | 1067 | qdisc_watchdog_cancel(&q->watchdog); |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 1068 | if (q->qdisc) |
Vlad Buslov | 86bd446 | 2018-09-24 19:22:50 +0300 | [diff] [blame] | 1069 | qdisc_put(q->qdisc); |
stephen hemminger | 6373a9a | 2011-02-23 13:04:18 +0000 | [diff] [blame] | 1070 | dist_free(q->delay_dist); |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 1071 | dist_free(q->slot_dist); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1072 | } |
| 1073 | |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 1074 | static int dump_loss_model(const struct netem_sched_data *q, |
| 1075 | struct sk_buff *skb) |
| 1076 | { |
| 1077 | struct nlattr *nest; |
| 1078 | |
Michal Kubecek | ae0be8d | 2019-04-26 11:13:06 +0200 | [diff] [blame] | 1079 | nest = nla_nest_start_noflag(skb, TCA_NETEM_LOSS); |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 1080 | if (nest == NULL) |
| 1081 | goto nla_put_failure; |
| 1082 | |
| 1083 | switch (q->loss_model) { |
| 1084 | case CLG_RANDOM: |
| 1085 | /* legacy loss model */ |
| 1086 | nla_nest_cancel(skb, nest); |
| 1087 | return 0; /* no data */ |
| 1088 | |
| 1089 | case CLG_4_STATES: { |
| 1090 | struct tc_netem_gimodel gi = { |
| 1091 | .p13 = q->clg.a1, |
| 1092 | .p31 = q->clg.a2, |
| 1093 | .p32 = q->clg.a3, |
| 1094 | .p14 = q->clg.a4, |
| 1095 | .p23 = q->clg.a5, |
| 1096 | }; |
| 1097 | |
David S. Miller | 1b34ec4 | 2012-03-29 05:11:39 -0400 | [diff] [blame] | 1098 | if (nla_put(skb, NETEM_LOSS_GI, sizeof(gi), &gi)) |
| 1099 | goto nla_put_failure; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 1100 | break; |
| 1101 | } |
| 1102 | case CLG_GILB_ELL: { |
| 1103 | struct tc_netem_gemodel ge = { |
| 1104 | .p = q->clg.a1, |
| 1105 | .r = q->clg.a2, |
| 1106 | .h = q->clg.a3, |
| 1107 | .k1 = q->clg.a4, |
| 1108 | }; |
| 1109 | |
David S. Miller | 1b34ec4 | 2012-03-29 05:11:39 -0400 | [diff] [blame] | 1110 | if (nla_put(skb, NETEM_LOSS_GE, sizeof(ge), &ge)) |
| 1111 | goto nla_put_failure; |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 1112 | break; |
| 1113 | } |
| 1114 | } |
| 1115 | |
| 1116 | nla_nest_end(skb, nest); |
| 1117 | return 0; |
| 1118 | |
| 1119 | nla_put_failure: |
| 1120 | nla_nest_cancel(skb, nest); |
| 1121 | return -1; |
| 1122 | } |
| 1123 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1124 | static int netem_dump(struct Qdisc *sch, struct sk_buff *skb) |
| 1125 | { |
| 1126 | const struct netem_sched_data *q = qdisc_priv(sch); |
stephen hemminger | 861d7f7 | 2011-02-23 13:04:17 +0000 | [diff] [blame] | 1127 | struct nlattr *nla = (struct nlattr *) skb_tail_pointer(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1128 | struct tc_netem_qopt qopt; |
| 1129 | struct tc_netem_corr cor; |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 1130 | struct tc_netem_reorder reorder; |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 1131 | struct tc_netem_corrupt corrupt; |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 1132 | struct tc_netem_rate rate; |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 1133 | struct tc_netem_slot slot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1134 | |
Dave Taht | 112f9cb | 2017-11-08 15:12:26 -0800 | [diff] [blame] | 1135 | qopt.latency = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->latency), |
| 1136 | UINT_MAX); |
| 1137 | qopt.jitter = min_t(psched_tdiff_t, PSCHED_NS2TICKS(q->jitter), |
| 1138 | UINT_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | qopt.limit = q->limit; |
| 1140 | qopt.loss = q->loss; |
| 1141 | qopt.gap = q->gap; |
| 1142 | qopt.duplicate = q->duplicate; |
David S. Miller | 1b34ec4 | 2012-03-29 05:11:39 -0400 | [diff] [blame] | 1143 | if (nla_put(skb, TCA_OPTIONS, sizeof(qopt), &qopt)) |
| 1144 | goto nla_put_failure; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1145 | |
Dave Taht | 9980317 | 2017-11-08 15:12:27 -0800 | [diff] [blame] | 1146 | if (nla_put(skb, TCA_NETEM_LATENCY64, sizeof(q->latency), &q->latency)) |
| 1147 | goto nla_put_failure; |
| 1148 | |
| 1149 | if (nla_put(skb, TCA_NETEM_JITTER64, sizeof(q->jitter), &q->jitter)) |
| 1150 | goto nla_put_failure; |
| 1151 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1152 | cor.delay_corr = q->delay_cor.rho; |
| 1153 | cor.loss_corr = q->loss_cor.rho; |
| 1154 | cor.dup_corr = q->dup_cor.rho; |
David S. Miller | 1b34ec4 | 2012-03-29 05:11:39 -0400 | [diff] [blame] | 1155 | if (nla_put(skb, TCA_NETEM_CORR, sizeof(cor), &cor)) |
| 1156 | goto nla_put_failure; |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 1157 | |
| 1158 | reorder.probability = q->reorder; |
| 1159 | reorder.correlation = q->reorder_cor.rho; |
David S. Miller | 1b34ec4 | 2012-03-29 05:11:39 -0400 | [diff] [blame] | 1160 | if (nla_put(skb, TCA_NETEM_REORDER, sizeof(reorder), &reorder)) |
| 1161 | goto nla_put_failure; |
Stephen Hemminger | 0dca51d | 2005-05-26 12:55:48 -0700 | [diff] [blame] | 1162 | |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 1163 | corrupt.probability = q->corrupt; |
| 1164 | corrupt.correlation = q->corrupt_cor.rho; |
David S. Miller | 1b34ec4 | 2012-03-29 05:11:39 -0400 | [diff] [blame] | 1165 | if (nla_put(skb, TCA_NETEM_CORRUPT, sizeof(corrupt), &corrupt)) |
| 1166 | goto nla_put_failure; |
Stephen Hemminger | c865e5d | 2005-12-21 19:03:44 -0800 | [diff] [blame] | 1167 | |
Yang Yingliang | 6a031f6 | 2013-12-25 17:35:15 +0800 | [diff] [blame] | 1168 | if (q->rate >= (1ULL << 32)) { |
Nicolas Dichtel | 2a51c1e | 2016-04-25 10:25:15 +0200 | [diff] [blame] | 1169 | if (nla_put_u64_64bit(skb, TCA_NETEM_RATE64, q->rate, |
| 1170 | TCA_NETEM_PAD)) |
Yang Yingliang | 6a031f6 | 2013-12-25 17:35:15 +0800 | [diff] [blame] | 1171 | goto nla_put_failure; |
| 1172 | rate.rate = ~0U; |
| 1173 | } else { |
| 1174 | rate.rate = q->rate; |
| 1175 | } |
Hagen Paul Pfeifer | 90b41a1 | 2011-12-12 14:30:00 +0000 | [diff] [blame] | 1176 | rate.packet_overhead = q->packet_overhead; |
| 1177 | rate.cell_size = q->cell_size; |
| 1178 | rate.cell_overhead = q->cell_overhead; |
David S. Miller | 1b34ec4 | 2012-03-29 05:11:39 -0400 | [diff] [blame] | 1179 | if (nla_put(skb, TCA_NETEM_RATE, sizeof(rate), &rate)) |
| 1180 | goto nla_put_failure; |
Hagen Paul Pfeifer | 7bc0f28 | 2011-11-30 12:20:26 +0000 | [diff] [blame] | 1181 | |
Eric Dumazet | e4ae004 | 2012-04-30 23:11:05 +0000 | [diff] [blame] | 1182 | if (q->ecn && nla_put_u32(skb, TCA_NETEM_ECN, q->ecn)) |
| 1183 | goto nla_put_failure; |
| 1184 | |
stephen hemminger | 661b797 | 2011-02-23 13:04:21 +0000 | [diff] [blame] | 1185 | if (dump_loss_model(q, skb) != 0) |
| 1186 | goto nla_put_failure; |
| 1187 | |
Yousuk Seung | 0a9fe5c | 2018-06-27 10:32:19 -0700 | [diff] [blame] | 1188 | if (q->slot_config.min_delay | q->slot_config.max_delay | |
| 1189 | q->slot_config.dist_jitter) { |
Dave Taht | 836af83 | 2017-11-08 15:12:28 -0800 | [diff] [blame] | 1190 | slot = q->slot_config; |
| 1191 | if (slot.max_packets == INT_MAX) |
| 1192 | slot.max_packets = 0; |
| 1193 | if (slot.max_bytes == INT_MAX) |
| 1194 | slot.max_bytes = 0; |
| 1195 | if (nla_put(skb, TCA_NETEM_SLOT, sizeof(slot), &slot)) |
| 1196 | goto nla_put_failure; |
| 1197 | } |
| 1198 | |
stephen hemminger | 861d7f7 | 2011-02-23 13:04:17 +0000 | [diff] [blame] | 1199 | return nla_nest_end(skb, nla); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | |
Patrick McHardy | 1e90474 | 2008-01-22 22:11:17 -0800 | [diff] [blame] | 1201 | nla_put_failure: |
stephen hemminger | 861d7f7 | 2011-02-23 13:04:17 +0000 | [diff] [blame] | 1202 | nlmsg_trim(skb, nla); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1203 | return -1; |
| 1204 | } |
| 1205 | |
stephen hemminger | 10f6dfc | 2011-02-23 13:04:20 +0000 | [diff] [blame] | 1206 | static int netem_dump_class(struct Qdisc *sch, unsigned long cl, |
| 1207 | struct sk_buff *skb, struct tcmsg *tcm) |
| 1208 | { |
| 1209 | struct netem_sched_data *q = qdisc_priv(sch); |
| 1210 | |
Eric Dumazet | 5061253 | 2011-12-28 23:12:02 +0000 | [diff] [blame] | 1211 | if (cl != 1 || !q->qdisc) /* only one class */ |
stephen hemminger | 10f6dfc | 2011-02-23 13:04:20 +0000 | [diff] [blame] | 1212 | return -ENOENT; |
| 1213 | |
| 1214 | tcm->tcm_handle |= TC_H_MIN(1); |
| 1215 | tcm->tcm_info = q->qdisc->handle; |
| 1216 | |
| 1217 | return 0; |
| 1218 | } |
| 1219 | |
| 1220 | static int netem_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, |
Alexander Aring | 653d6fd | 2017-12-20 12:35:17 -0500 | [diff] [blame] | 1221 | struct Qdisc **old, struct netlink_ext_ack *extack) |
stephen hemminger | 10f6dfc | 2011-02-23 13:04:20 +0000 | [diff] [blame] | 1222 | { |
| 1223 | struct netem_sched_data *q = qdisc_priv(sch); |
| 1224 | |
WANG Cong | 86a7996 | 2016-02-25 14:55:00 -0800 | [diff] [blame] | 1225 | *old = qdisc_replace(sch, new, &q->qdisc); |
stephen hemminger | 10f6dfc | 2011-02-23 13:04:20 +0000 | [diff] [blame] | 1226 | return 0; |
| 1227 | } |
| 1228 | |
| 1229 | static struct Qdisc *netem_leaf(struct Qdisc *sch, unsigned long arg) |
| 1230 | { |
| 1231 | struct netem_sched_data *q = qdisc_priv(sch); |
| 1232 | return q->qdisc; |
| 1233 | } |
| 1234 | |
WANG Cong | 143976c | 2017-08-24 16:51:29 -0700 | [diff] [blame] | 1235 | static unsigned long netem_find(struct Qdisc *sch, u32 classid) |
stephen hemminger | 10f6dfc | 2011-02-23 13:04:20 +0000 | [diff] [blame] | 1236 | { |
| 1237 | return 1; |
| 1238 | } |
| 1239 | |
stephen hemminger | 10f6dfc | 2011-02-23 13:04:20 +0000 | [diff] [blame] | 1240 | static void netem_walk(struct Qdisc *sch, struct qdisc_walker *walker) |
| 1241 | { |
| 1242 | if (!walker->stop) { |
| 1243 | if (walker->count >= walker->skip) |
| 1244 | if (walker->fn(sch, 1, walker) < 0) { |
| 1245 | walker->stop = 1; |
| 1246 | return; |
| 1247 | } |
| 1248 | walker->count++; |
| 1249 | } |
| 1250 | } |
| 1251 | |
| 1252 | static const struct Qdisc_class_ops netem_class_ops = { |
| 1253 | .graft = netem_graft, |
| 1254 | .leaf = netem_leaf, |
WANG Cong | 143976c | 2017-08-24 16:51:29 -0700 | [diff] [blame] | 1255 | .find = netem_find, |
stephen hemminger | 10f6dfc | 2011-02-23 13:04:20 +0000 | [diff] [blame] | 1256 | .walk = netem_walk, |
| 1257 | .dump = netem_dump_class, |
| 1258 | }; |
| 1259 | |
Eric Dumazet | 20fea08 | 2007-11-14 01:44:41 -0800 | [diff] [blame] | 1260 | static struct Qdisc_ops netem_qdisc_ops __read_mostly = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1261 | .id = "netem", |
stephen hemminger | 10f6dfc | 2011-02-23 13:04:20 +0000 | [diff] [blame] | 1262 | .cl_ops = &netem_class_ops, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1263 | .priv_size = sizeof(struct netem_sched_data), |
| 1264 | .enqueue = netem_enqueue, |
| 1265 | .dequeue = netem_dequeue, |
Jarek Poplawski | 77be155 | 2008-10-31 00:47:01 -0700 | [diff] [blame] | 1266 | .peek = qdisc_peek_dequeued, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | .init = netem_init, |
| 1268 | .reset = netem_reset, |
| 1269 | .destroy = netem_destroy, |
| 1270 | .change = netem_change, |
| 1271 | .dump = netem_dump, |
| 1272 | .owner = THIS_MODULE, |
| 1273 | }; |
| 1274 | |
| 1275 | |
| 1276 | static int __init netem_module_init(void) |
| 1277 | { |
Stephen Hemminger | eb229c4 | 2005-11-03 13:49:01 -0800 | [diff] [blame] | 1278 | pr_info("netem: version " VERSION "\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1279 | return register_qdisc(&netem_qdisc_ops); |
| 1280 | } |
| 1281 | static void __exit netem_module_exit(void) |
| 1282 | { |
| 1283 | unregister_qdisc(&netem_qdisc_ops); |
| 1284 | } |
| 1285 | module_init(netem_module_init) |
| 1286 | module_exit(netem_module_exit) |
| 1287 | MODULE_LICENSE("GPL"); |