blob: 3d061a13d7ed2bd5294c3eddd1bd1bcdaafda464 [file] [log] [blame]
Thomas Gleixnerd2912cb2019-06-04 10:11:33 +02001// SPDX-License-Identifier: GPL-2.0-only
Eric Dumazete13e02a2011-02-23 10:56:17 +00002/*
3 * net/sched/sch_sfb.c Stochastic Fair Blue
4 *
5 * Copyright (c) 2008-2011 Juliusz Chroboczek <jch@pps.jussieu.fr>
6 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7 *
Eric Dumazete13e02a2011-02-23 10:56:17 +00008 * W. Feng, D. Kandlur, D. Saha, K. Shin. Blue:
9 * A New Class of Active Queue Management Algorithms.
10 * U. Michigan CSE-TR-387-99, April 1999.
11 *
12 * http://www.thefengs.com/wuchang/blue/CSE-TR-387-99.pdf
Eric Dumazete13e02a2011-02-23 10:56:17 +000013 */
14
15#include <linux/module.h>
16#include <linux/types.h>
17#include <linux/kernel.h>
18#include <linux/errno.h>
19#include <linux/skbuff.h>
20#include <linux/random.h>
Eric Dumazet55667442019-10-22 07:57:46 -070021#include <linux/siphash.h>
Eric Dumazete13e02a2011-02-23 10:56:17 +000022#include <net/ip.h>
23#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010024#include <net/pkt_cls.h>
Eric Dumazete13e02a2011-02-23 10:56:17 +000025#include <net/inet_ecn.h>
26
27/*
28 * SFB uses two B[l][n] : L x N arrays of bins (L levels, N bins per level)
29 * This implementation uses L = 8 and N = 16
30 * This permits us to split one 32bit hash (provided per packet by rxhash or
31 * external classifier) into 8 subhashes of 4 bits.
32 */
33#define SFB_BUCKET_SHIFT 4
34#define SFB_NUMBUCKETS (1 << SFB_BUCKET_SHIFT) /* N bins per Level */
35#define SFB_BUCKET_MASK (SFB_NUMBUCKETS - 1)
36#define SFB_LEVELS (32 / SFB_BUCKET_SHIFT) /* L */
37
38/* SFB algo uses a virtual queue, named "bin" */
39struct sfb_bucket {
40 u16 qlen; /* length of virtual queue */
41 u16 p_mark; /* marking probability */
42};
43
44/* We use a double buffering right before hash change
45 * (Section 4.4 of SFB reference : moving hash functions)
46 */
47struct sfb_bins {
Eric Dumazet55667442019-10-22 07:57:46 -070048 siphash_key_t perturbation; /* siphash key */
Eric Dumazete13e02a2011-02-23 10:56:17 +000049 struct sfb_bucket bins[SFB_LEVELS][SFB_NUMBUCKETS];
50};
51
52struct sfb_sched_data {
53 struct Qdisc *qdisc;
John Fastabend25d8c0d2014-09-12 20:05:27 -070054 struct tcf_proto __rcu *filter_list;
Jiri Pirko6529eab2017-05-17 11:07:55 +020055 struct tcf_block *block;
Eric Dumazete13e02a2011-02-23 10:56:17 +000056 unsigned long rehash_interval;
57 unsigned long warmup_time; /* double buffering warmup time in jiffies */
58 u32 max;
59 u32 bin_size; /* maximum queue length per bin */
60 u32 increment; /* d1 */
61 u32 decrement; /* d2 */
62 u32 limit; /* HARD maximal queue length */
63 u32 penalty_rate;
64 u32 penalty_burst;
65 u32 tokens_avail;
66 unsigned long rehash_time;
67 unsigned long token_time;
68
69 u8 slot; /* current active bins (0 or 1) */
70 bool double_buffering;
71 struct sfb_bins bins[2];
72
73 struct {
74 u32 earlydrop;
75 u32 penaltydrop;
76 u32 bucketdrop;
77 u32 queuedrop;
78 u32 childdrop; /* drops in child qdisc */
79 u32 marked; /* ECN mark */
80 } stats;
81};
82
83/*
84 * Each queued skb might be hashed on one or two bins
85 * We store in skb_cb the two hash values.
86 * (A zero value means double buffering was not used)
87 */
88struct sfb_skb_cb {
89 u32 hashes[2];
90};
91
92static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb)
93{
David S. Miller16bda132012-02-06 15:14:37 -050094 qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb));
Eric Dumazete13e02a2011-02-23 10:56:17 +000095 return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data;
96}
97
98/*
99 * If using 'internal' SFB flow classifier, hash comes from skb rxhash
100 * If using external classifier, hash comes from the classid.
101 */
102static u32 sfb_hash(const struct sk_buff *skb, u32 slot)
103{
104 return sfb_skb_cb(skb)->hashes[slot];
105}
106
107/* Probabilities are coded as Q0.16 fixed-point values,
108 * with 0xFFFF representing 65535/65536 (almost 1.0)
109 * Addition and subtraction are saturating in [0, 65535]
110 */
111static u32 prob_plus(u32 p1, u32 p2)
112{
113 u32 res = p1 + p2;
114
115 return min_t(u32, res, SFB_MAX_PROB);
116}
117
118static u32 prob_minus(u32 p1, u32 p2)
119{
120 return p1 > p2 ? p1 - p2 : 0;
121}
122
123static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q)
124{
125 int i;
126 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
127
128 for (i = 0; i < SFB_LEVELS; i++) {
129 u32 hash = sfbhash & SFB_BUCKET_MASK;
130
131 sfbhash >>= SFB_BUCKET_SHIFT;
132 if (b[hash].qlen < 0xFFFF)
133 b[hash].qlen++;
134 b += SFB_NUMBUCKETS; /* next level */
135 }
136}
137
138static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
139{
140 u32 sfbhash;
141
142 sfbhash = sfb_hash(skb, 0);
143 if (sfbhash)
144 increment_one_qlen(sfbhash, 0, q);
145
146 sfbhash = sfb_hash(skb, 1);
147 if (sfbhash)
148 increment_one_qlen(sfbhash, 1, q);
149}
150
151static void decrement_one_qlen(u32 sfbhash, u32 slot,
152 struct sfb_sched_data *q)
153{
154 int i;
155 struct sfb_bucket *b = &q->bins[slot].bins[0][0];
156
157 for (i = 0; i < SFB_LEVELS; i++) {
158 u32 hash = sfbhash & SFB_BUCKET_MASK;
159
160 sfbhash >>= SFB_BUCKET_SHIFT;
161 if (b[hash].qlen > 0)
162 b[hash].qlen--;
163 b += SFB_NUMBUCKETS; /* next level */
164 }
165}
166
167static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q)
168{
169 u32 sfbhash;
170
171 sfbhash = sfb_hash(skb, 0);
172 if (sfbhash)
173 decrement_one_qlen(sfbhash, 0, q);
174
175 sfbhash = sfb_hash(skb, 1);
176 if (sfbhash)
177 decrement_one_qlen(sfbhash, 1, q);
178}
179
180static void decrement_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
181{
182 b->p_mark = prob_minus(b->p_mark, q->decrement);
183}
184
185static void increment_prob(struct sfb_bucket *b, struct sfb_sched_data *q)
186{
187 b->p_mark = prob_plus(b->p_mark, q->increment);
188}
189
190static void sfb_zero_all_buckets(struct sfb_sched_data *q)
191{
192 memset(&q->bins, 0, sizeof(q->bins));
193}
194
195/*
196 * compute max qlen, max p_mark, and avg p_mark
197 */
198static u32 sfb_compute_qlen(u32 *prob_r, u32 *avgpm_r, const struct sfb_sched_data *q)
199{
200 int i;
201 u32 qlen = 0, prob = 0, totalpm = 0;
202 const struct sfb_bucket *b = &q->bins[q->slot].bins[0][0];
203
204 for (i = 0; i < SFB_LEVELS * SFB_NUMBUCKETS; i++) {
205 if (qlen < b->qlen)
206 qlen = b->qlen;
207 totalpm += b->p_mark;
208 if (prob < b->p_mark)
209 prob = b->p_mark;
210 b++;
211 }
212 *prob_r = prob;
213 *avgpm_r = totalpm / (SFB_LEVELS * SFB_NUMBUCKETS);
214 return qlen;
215}
216
217
218static void sfb_init_perturbation(u32 slot, struct sfb_sched_data *q)
219{
Eric Dumazet55667442019-10-22 07:57:46 -0700220 get_random_bytes(&q->bins[slot].perturbation,
221 sizeof(q->bins[slot].perturbation));
Eric Dumazete13e02a2011-02-23 10:56:17 +0000222}
223
224static void sfb_swap_slot(struct sfb_sched_data *q)
225{
226 sfb_init_perturbation(q->slot, q);
227 q->slot ^= 1;
228 q->double_buffering = false;
229}
230
231/* Non elastic flows are allowed to use part of the bandwidth, expressed
232 * in "penalty_rate" packets per second, with "penalty_burst" burst
233 */
234static bool sfb_rate_limit(struct sk_buff *skb, struct sfb_sched_data *q)
235{
236 if (q->penalty_rate == 0 || q->penalty_burst == 0)
237 return true;
238
239 if (q->tokens_avail < 1) {
240 unsigned long age = min(10UL * HZ, jiffies - q->token_time);
241
242 q->tokens_avail = (age * q->penalty_rate) / HZ;
243 if (q->tokens_avail > q->penalty_burst)
244 q->tokens_avail = q->penalty_burst;
245 q->token_time = jiffies;
246 if (q->tokens_avail < 1)
247 return true;
248 }
249
250 q->tokens_avail--;
251 return false;
252}
253
John Fastabend25d8c0d2014-09-12 20:05:27 -0700254static bool sfb_classify(struct sk_buff *skb, struct tcf_proto *fl,
Eric Dumazete13e02a2011-02-23 10:56:17 +0000255 int *qerr, u32 *salt)
256{
257 struct tcf_result res;
258 int result;
259
Davide Caratti3aa26052021-07-28 20:08:00 +0200260 result = tcf_classify(skb, NULL, fl, &res, false);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000261 if (result >= 0) {
262#ifdef CONFIG_NET_CLS_ACT
263 switch (result) {
264 case TC_ACT_STOLEN:
265 case TC_ACT_QUEUED:
Jiri Pirkoe25ea212017-06-06 14:12:02 +0200266 case TC_ACT_TRAP:
Eric Dumazete13e02a2011-02-23 10:56:17 +0000267 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Gustavo A. R. Silva964201d2020-07-07 12:21:38 -0500268 fallthrough;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000269 case TC_ACT_SHOT:
270 return false;
271 }
272#endif
273 *salt = TC_H_MIN(res.classid);
274 return true;
275 }
276 return false;
277}
278
Petr Machataac5c66f2020-07-14 20:03:08 +0300279static int sfb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
Eric Dumazet520ac302016-06-21 23:16:49 -0700280 struct sk_buff **to_free)
Eric Dumazete13e02a2011-02-23 10:56:17 +0000281{
282
283 struct sfb_sched_data *q = qdisc_priv(sch);
284 struct Qdisc *child = q->qdisc;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700285 struct tcf_proto *fl;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000286 int i;
287 u32 p_min = ~0;
288 u32 minqlen = ~0;
Tom Herbert63c0ad42015-05-01 11:30:15 -0700289 u32 r, sfbhash;
290 u32 slot = q->slot;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000291 int ret = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
292
Eric Dumazet363437f2011-08-25 06:21:32 +0000293 if (unlikely(sch->q.qlen >= q->limit)) {
John Fastabend25331d62014-09-28 11:53:29 -0700294 qdisc_qstats_overlimit(sch);
Eric Dumazet363437f2011-08-25 06:21:32 +0000295 q->stats.queuedrop++;
296 goto drop;
297 }
298
Eric Dumazete13e02a2011-02-23 10:56:17 +0000299 if (q->rehash_interval > 0) {
300 unsigned long limit = q->rehash_time + q->rehash_interval;
301
302 if (unlikely(time_after(jiffies, limit))) {
303 sfb_swap_slot(q);
304 q->rehash_time = jiffies;
305 } else if (unlikely(!q->double_buffering && q->warmup_time > 0 &&
306 time_after(jiffies, limit - q->warmup_time))) {
307 q->double_buffering = true;
308 }
309 }
310
John Fastabend25d8c0d2014-09-12 20:05:27 -0700311 fl = rcu_dereference_bh(q->filter_list);
312 if (fl) {
Tom Herbert63c0ad42015-05-01 11:30:15 -0700313 u32 salt;
314
Eric Dumazete13e02a2011-02-23 10:56:17 +0000315 /* If using external classifiers, get result and record it. */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700316 if (!sfb_classify(skb, fl, &ret, &salt))
Eric Dumazete13e02a2011-02-23 10:56:17 +0000317 goto other_drop;
Eric Dumazet55667442019-10-22 07:57:46 -0700318 sfbhash = siphash_1u32(salt, &q->bins[slot].perturbation);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000319 } else {
Eric Dumazet55667442019-10-22 07:57:46 -0700320 sfbhash = skb_get_hash_perturb(skb, &q->bins[slot].perturbation);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000321 }
322
Eric Dumazete13e02a2011-02-23 10:56:17 +0000323
Eric Dumazete13e02a2011-02-23 10:56:17 +0000324 if (!sfbhash)
325 sfbhash = 1;
326 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
327
328 for (i = 0; i < SFB_LEVELS; i++) {
329 u32 hash = sfbhash & SFB_BUCKET_MASK;
330 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
331
332 sfbhash >>= SFB_BUCKET_SHIFT;
333 if (b->qlen == 0)
334 decrement_prob(b, q);
335 else if (b->qlen >= q->bin_size)
336 increment_prob(b, q);
337 if (minqlen > b->qlen)
338 minqlen = b->qlen;
339 if (p_min > b->p_mark)
340 p_min = b->p_mark;
341 }
342
343 slot ^= 1;
344 sfb_skb_cb(skb)->hashes[slot] = 0;
345
Eric Dumazet363437f2011-08-25 06:21:32 +0000346 if (unlikely(minqlen >= q->max)) {
John Fastabend25331d62014-09-28 11:53:29 -0700347 qdisc_qstats_overlimit(sch);
Eric Dumazet363437f2011-08-25 06:21:32 +0000348 q->stats.bucketdrop++;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000349 goto drop;
350 }
351
352 if (unlikely(p_min >= SFB_MAX_PROB)) {
353 /* Inelastic flow */
354 if (q->double_buffering) {
Tom Herbert63c0ad42015-05-01 11:30:15 -0700355 sfbhash = skb_get_hash_perturb(skb,
Eric Dumazet55667442019-10-22 07:57:46 -0700356 &q->bins[slot].perturbation);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000357 if (!sfbhash)
358 sfbhash = 1;
359 sfb_skb_cb(skb)->hashes[slot] = sfbhash;
360
361 for (i = 0; i < SFB_LEVELS; i++) {
362 u32 hash = sfbhash & SFB_BUCKET_MASK;
363 struct sfb_bucket *b = &q->bins[slot].bins[i][hash];
364
365 sfbhash >>= SFB_BUCKET_SHIFT;
366 if (b->qlen == 0)
367 decrement_prob(b, q);
368 else if (b->qlen >= q->bin_size)
369 increment_prob(b, q);
370 }
371 }
372 if (sfb_rate_limit(skb, q)) {
John Fastabend25331d62014-09-28 11:53:29 -0700373 qdisc_qstats_overlimit(sch);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000374 q->stats.penaltydrop++;
375 goto drop;
376 }
377 goto enqueue;
378 }
379
Aruna-Hewapathirane63862b52014-01-11 07:15:59 -0500380 r = prandom_u32() & SFB_MAX_PROB;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000381
382 if (unlikely(r < p_min)) {
383 if (unlikely(p_min > SFB_MAX_PROB / 2)) {
384 /* If we're marking that many packets, then either
385 * this flow is unresponsive, or we're badly congested.
386 * In either case, we want to start dropping packets.
387 */
388 if (r < (p_min - SFB_MAX_PROB / 2) * 2) {
389 q->stats.earlydrop++;
390 goto drop;
391 }
392 }
393 if (INET_ECN_set_ce(skb)) {
394 q->stats.marked++;
395 } else {
396 q->stats.earlydrop++;
397 goto drop;
398 }
399 }
400
401enqueue:
Petr Machataac5c66f2020-07-14 20:03:08 +0300402 ret = qdisc_enqueue(skb, child, to_free);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000403 if (likely(ret == NET_XMIT_SUCCESS)) {
WANG Cong3d4357f2016-09-18 16:22:48 -0700404 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000405 sch->q.qlen++;
406 increment_qlen(skb, q);
407 } else if (net_xmit_drop_count(ret)) {
408 q->stats.childdrop++;
John Fastabend25331d62014-09-28 11:53:29 -0700409 qdisc_qstats_drop(sch);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000410 }
411 return ret;
412
413drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700414 qdisc_drop(skb, sch, to_free);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000415 return NET_XMIT_CN;
416other_drop:
417 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700418 qdisc_qstats_drop(sch);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000419 kfree_skb(skb);
420 return ret;
421}
422
423static struct sk_buff *sfb_dequeue(struct Qdisc *sch)
424{
425 struct sfb_sched_data *q = qdisc_priv(sch);
426 struct Qdisc *child = q->qdisc;
427 struct sk_buff *skb;
428
429 skb = child->dequeue(q->qdisc);
430
431 if (skb) {
432 qdisc_bstats_update(sch, skb);
WANG Cong3d4357f2016-09-18 16:22:48 -0700433 qdisc_qstats_backlog_dec(sch, skb);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000434 sch->q.qlen--;
435 decrement_qlen(skb, q);
436 }
437
438 return skb;
439}
440
441static struct sk_buff *sfb_peek(struct Qdisc *sch)
442{
443 struct sfb_sched_data *q = qdisc_priv(sch);
444 struct Qdisc *child = q->qdisc;
445
446 return child->ops->peek(child);
447}
448
449/* No sfb_drop -- impossible since the child doesn't return the dropped skb. */
450
451static void sfb_reset(struct Qdisc *sch)
452{
453 struct sfb_sched_data *q = qdisc_priv(sch);
454
455 qdisc_reset(q->qdisc);
WANG Cong3d4357f2016-09-18 16:22:48 -0700456 sch->qstats.backlog = 0;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000457 sch->q.qlen = 0;
458 q->slot = 0;
459 q->double_buffering = false;
460 sfb_zero_all_buckets(q);
461 sfb_init_perturbation(0, q);
462}
463
464static void sfb_destroy(struct Qdisc *sch)
465{
466 struct sfb_sched_data *q = qdisc_priv(sch);
467
Jiri Pirko6529eab2017-05-17 11:07:55 +0200468 tcf_block_put(q->block);
Vlad Buslov86bd4462018-09-24 19:22:50 +0300469 qdisc_put(q->qdisc);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000470}
471
472static const struct nla_policy sfb_policy[TCA_SFB_MAX + 1] = {
473 [TCA_SFB_PARMS] = { .len = sizeof(struct tc_sfb_qopt) },
474};
475
476static const struct tc_sfb_qopt sfb_default_ops = {
477 .rehash_interval = 600 * MSEC_PER_SEC,
478 .warmup_time = 60 * MSEC_PER_SEC,
479 .limit = 0,
480 .max = 25,
481 .bin_size = 20,
482 .increment = (SFB_MAX_PROB + 500) / 1000, /* 0.1 % */
483 .decrement = (SFB_MAX_PROB + 3000) / 6000,
484 .penalty_rate = 10,
485 .penalty_burst = 20,
486};
487
Alexander Aring20307212017-12-20 12:35:14 -0500488static int sfb_change(struct Qdisc *sch, struct nlattr *opt,
489 struct netlink_ext_ack *extack)
Eric Dumazete13e02a2011-02-23 10:56:17 +0000490{
491 struct sfb_sched_data *q = qdisc_priv(sch);
Vlad Buslove3ae1f92019-09-24 18:51:18 +0300492 struct Qdisc *child, *old;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000493 struct nlattr *tb[TCA_SFB_MAX + 1];
494 const struct tc_sfb_qopt *ctl = &sfb_default_ops;
495 u32 limit;
496 int err;
497
498 if (opt) {
Johannes Berg8cb08172019-04-26 14:07:28 +0200499 err = nla_parse_nested_deprecated(tb, TCA_SFB_MAX, opt,
500 sfb_policy, NULL);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000501 if (err < 0)
502 return -EINVAL;
503
504 if (tb[TCA_SFB_PARMS] == NULL)
505 return -EINVAL;
506
507 ctl = nla_data(tb[TCA_SFB_PARMS]);
508 }
509
510 limit = ctl->limit;
511 if (limit == 0)
Phil Sutter348e3432015-08-18 10:30:49 +0200512 limit = qdisc_dev(sch)->tx_queue_len;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000513
Alexander Aringa38a98822017-12-20 12:35:21 -0500514 child = fifo_create_dflt(sch, &pfifo_qdisc_ops, limit, extack);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000515 if (IS_ERR(child))
516 return PTR_ERR(child);
517
Jiri Kosina49b49972017-03-08 16:03:32 +0100518 if (child != &noop_qdisc)
519 qdisc_hash_add(child, true);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000520 sch_tree_lock(sch);
521
Vlad Buslove3ae1f92019-09-24 18:51:18 +0300522 qdisc_purge_queue(q->qdisc);
523 old = q->qdisc;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000524 q->qdisc = child;
525
526 q->rehash_interval = msecs_to_jiffies(ctl->rehash_interval);
527 q->warmup_time = msecs_to_jiffies(ctl->warmup_time);
528 q->rehash_time = jiffies;
529 q->limit = limit;
530 q->increment = ctl->increment;
531 q->decrement = ctl->decrement;
532 q->max = ctl->max;
533 q->bin_size = ctl->bin_size;
534 q->penalty_rate = ctl->penalty_rate;
535 q->penalty_burst = ctl->penalty_burst;
536 q->tokens_avail = ctl->penalty_burst;
537 q->token_time = jiffies;
538
539 q->slot = 0;
540 q->double_buffering = false;
541 sfb_zero_all_buckets(q);
542 sfb_init_perturbation(0, q);
543 sfb_init_perturbation(1, q);
544
545 sch_tree_unlock(sch);
Vlad Buslove3ae1f92019-09-24 18:51:18 +0300546 qdisc_put(old);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000547
548 return 0;
549}
550
Alexander Aringe63d7df2017-12-20 12:35:13 -0500551static int sfb_init(struct Qdisc *sch, struct nlattr *opt,
552 struct netlink_ext_ack *extack)
Eric Dumazete13e02a2011-02-23 10:56:17 +0000553{
554 struct sfb_sched_data *q = qdisc_priv(sch);
Jiri Pirko6529eab2017-05-17 11:07:55 +0200555 int err;
556
Alexander Aring8d1a77f2017-12-20 12:35:19 -0500557 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +0200558 if (err)
559 return err;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000560
561 q->qdisc = &noop_qdisc;
Alexander Aring20307212017-12-20 12:35:14 -0500562 return sfb_change(sch, opt, extack);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000563}
564
565static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
566{
567 struct sfb_sched_data *q = qdisc_priv(sch);
568 struct nlattr *opts;
569 struct tc_sfb_qopt opt = {
570 .rehash_interval = jiffies_to_msecs(q->rehash_interval),
571 .warmup_time = jiffies_to_msecs(q->warmup_time),
572 .limit = q->limit,
573 .max = q->max,
574 .bin_size = q->bin_size,
575 .increment = q->increment,
576 .decrement = q->decrement,
577 .penalty_rate = q->penalty_rate,
578 .penalty_burst = q->penalty_burst,
579 };
580
581 sch->qstats.backlog = q->qdisc->qstats.backlog;
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200582 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
Alan Cox7ac29082012-07-12 03:39:11 +0000583 if (opts == NULL)
584 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400585 if (nla_put(skb, TCA_SFB_PARMS, sizeof(opt), &opt))
586 goto nla_put_failure;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000587 return nla_nest_end(skb, opts);
588
589nla_put_failure:
590 nla_nest_cancel(skb, opts);
591 return -EMSGSIZE;
592}
593
594static int sfb_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
595{
596 struct sfb_sched_data *q = qdisc_priv(sch);
597 struct tc_sfb_xstats st = {
598 .earlydrop = q->stats.earlydrop,
599 .penaltydrop = q->stats.penaltydrop,
600 .bucketdrop = q->stats.bucketdrop,
601 .queuedrop = q->stats.queuedrop,
602 .childdrop = q->stats.childdrop,
603 .marked = q->stats.marked,
604 };
605
606 st.maxqlen = sfb_compute_qlen(&st.maxprob, &st.avgprob, q);
607
608 return gnet_stats_copy_app(d, &st, sizeof(st));
609}
610
611static int sfb_dump_class(struct Qdisc *sch, unsigned long cl,
612 struct sk_buff *skb, struct tcmsg *tcm)
613{
614 return -ENOSYS;
615}
616
617static int sfb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -0500618 struct Qdisc **old, struct netlink_ext_ack *extack)
Eric Dumazete13e02a2011-02-23 10:56:17 +0000619{
620 struct sfb_sched_data *q = qdisc_priv(sch);
621
622 if (new == NULL)
623 new = &noop_qdisc;
624
WANG Cong86a79962016-02-25 14:55:00 -0800625 *old = qdisc_replace(sch, new, &q->qdisc);
Eric Dumazete13e02a2011-02-23 10:56:17 +0000626 return 0;
627}
628
629static struct Qdisc *sfb_leaf(struct Qdisc *sch, unsigned long arg)
630{
631 struct sfb_sched_data *q = qdisc_priv(sch);
632
633 return q->qdisc;
634}
635
WANG Cong143976c2017-08-24 16:51:29 -0700636static unsigned long sfb_find(struct Qdisc *sch, u32 classid)
Eric Dumazete13e02a2011-02-23 10:56:17 +0000637{
638 return 1;
639}
640
WANG Cong143976c2017-08-24 16:51:29 -0700641static void sfb_unbind(struct Qdisc *sch, unsigned long arg)
Eric Dumazete13e02a2011-02-23 10:56:17 +0000642{
643}
644
645static int sfb_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
Alexander Aring793d81d2017-12-20 12:35:15 -0500646 struct nlattr **tca, unsigned long *arg,
647 struct netlink_ext_ack *extack)
Eric Dumazete13e02a2011-02-23 10:56:17 +0000648{
649 return -ENOSYS;
650}
651
Maxim Mikityanskiy4dd78a72021-01-19 14:08:12 +0200652static int sfb_delete(struct Qdisc *sch, unsigned long cl,
653 struct netlink_ext_ack *extack)
Eric Dumazete13e02a2011-02-23 10:56:17 +0000654{
655 return -ENOSYS;
656}
657
658static void sfb_walk(struct Qdisc *sch, struct qdisc_walker *walker)
659{
660 if (!walker->stop) {
661 if (walker->count >= walker->skip)
662 if (walker->fn(sch, 1, walker) < 0) {
663 walker->stop = 1;
664 return;
665 }
666 walker->count++;
667 }
668}
669
Alexander Aringcbaacc42017-12-20 12:35:16 -0500670static struct tcf_block *sfb_tcf_block(struct Qdisc *sch, unsigned long cl,
671 struct netlink_ext_ack *extack)
Eric Dumazete13e02a2011-02-23 10:56:17 +0000672{
673 struct sfb_sched_data *q = qdisc_priv(sch);
674
675 if (cl)
676 return NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200677 return q->block;
Eric Dumazete13e02a2011-02-23 10:56:17 +0000678}
679
680static unsigned long sfb_bind(struct Qdisc *sch, unsigned long parent,
681 u32 classid)
682{
683 return 0;
684}
685
686
687static const struct Qdisc_class_ops sfb_class_ops = {
688 .graft = sfb_graft,
689 .leaf = sfb_leaf,
WANG Cong143976c2017-08-24 16:51:29 -0700690 .find = sfb_find,
Eric Dumazete13e02a2011-02-23 10:56:17 +0000691 .change = sfb_change_class,
692 .delete = sfb_delete,
693 .walk = sfb_walk,
Jiri Pirko6529eab2017-05-17 11:07:55 +0200694 .tcf_block = sfb_tcf_block,
Eric Dumazete13e02a2011-02-23 10:56:17 +0000695 .bind_tcf = sfb_bind,
WANG Cong143976c2017-08-24 16:51:29 -0700696 .unbind_tcf = sfb_unbind,
Eric Dumazete13e02a2011-02-23 10:56:17 +0000697 .dump = sfb_dump_class,
698};
699
700static struct Qdisc_ops sfb_qdisc_ops __read_mostly = {
701 .id = "sfb",
702 .priv_size = sizeof(struct sfb_sched_data),
703 .cl_ops = &sfb_class_ops,
704 .enqueue = sfb_enqueue,
705 .dequeue = sfb_dequeue,
706 .peek = sfb_peek,
707 .init = sfb_init,
708 .reset = sfb_reset,
709 .destroy = sfb_destroy,
710 .change = sfb_change,
711 .dump = sfb_dump,
712 .dump_stats = sfb_dump_stats,
713 .owner = THIS_MODULE,
714};
715
716static int __init sfb_module_init(void)
717{
718 return register_qdisc(&sfb_qdisc_ops);
719}
720
721static void __exit sfb_module_exit(void)
722{
723 unregister_qdisc(&sfb_qdisc_ops);
724}
725
726module_init(sfb_module_init)
727module_exit(sfb_module_exit)
728
729MODULE_DESCRIPTION("Stochastic Fair Blue queue discipline");
730MODULE_AUTHOR("Juliusz Chroboczek");
731MODULE_AUTHOR("Eric Dumazet");
732MODULE_LICENSE("GPL");