blob: 6e61f9aa8783977e386423fd1cddc20aa0860f34 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * net/sched/sch_cbq.c Class-Based Queueing discipline.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
10 *
11 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/types.h>
16#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <linux/skbuff.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070020#include <net/netlink.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <net/pkt_sched.h>
22
23
24/* Class-Based Queueing (CBQ) algorithm.
25 =======================================
26
27 Sources: [1] Sally Floyd and Van Jacobson, "Link-sharing and Resource
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090028 Management Models for Packet Networks",
Linus Torvalds1da177e2005-04-16 15:20:36 -070029 IEEE/ACM Transactions on Networking, Vol.3, No.4, 1995
30
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090031 [2] Sally Floyd, "Notes on CBQ and Guaranteed Service", 1995
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090033 [3] Sally Floyd, "Notes on Class-Based Queueing: Setting
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 Parameters", 1996
35
36 [4] Sally Floyd and Michael Speer, "Experimental Results
37 for Class-Based Queueing", 1998, not published.
38
39 -----------------------------------------------------------------------
40
41 Algorithm skeleton was taken from NS simulator cbq.cc.
42 If someone wants to check this code against the LBL version,
43 he should take into account that ONLY the skeleton was borrowed,
44 the implementation is different. Particularly:
45
46 --- The WRR algorithm is different. Our version looks more
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090047 reasonable (I hope) and works when quanta are allowed to be
48 less than MTU, which is always the case when real time classes
49 have small rates. Note, that the statement of [3] is
50 incomplete, delay may actually be estimated even if class
51 per-round allotment is less than MTU. Namely, if per-round
52 allotment is W*r_i, and r_1+...+r_k = r < 1
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
54 delay_i <= ([MTU/(W*r_i)]*W*r + W*r + k*MTU)/B
55
56 In the worst case we have IntServ estimate with D = W*r+k*MTU
57 and C = MTU*r. The proof (if correct at all) is trivial.
58
59
60 --- It seems that cbq-2.0 is not very accurate. At least, I cannot
61 interpret some places, which look like wrong translations
62 from NS. Anyone is advised to find these differences
63 and explain to me, why I am wrong 8).
64
65 --- Linux has no EOI event, so that we cannot estimate true class
66 idle time. Workaround is to consider the next dequeue event
67 as sign that previous packet is finished. This is wrong because of
68 internal device queueing, but on a permanently loaded link it is true.
69 Moreover, combined with clock integrator, this scheme looks
70 very close to an ideal solution. */
71
72struct cbq_sched_data;
73
74
Eric Dumazetcc7ec452011-01-19 19:26:56 +000075struct cbq_class {
Patrick McHardyd77fea22008-07-05 23:22:05 -070076 struct Qdisc_class_common common;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 struct cbq_class *next_alive; /* next class with backlog in this priority band */
78
79/* Parameters */
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 unsigned char priority; /* class priority */
81 unsigned char priority2; /* priority to be used after overlimit */
82 unsigned char ewma_log; /* time constant for idle time calculation */
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
84 u32 defmap;
85
86 /* Link-sharing scheduler parameters */
87 long maxidle; /* Class parameters: see below. */
88 long offtime;
89 long minidle;
90 u32 avpkt;
91 struct qdisc_rate_table *R_tab;
92
Linus Torvalds1da177e2005-04-16 15:20:36 -070093 /* General scheduler (WRR) parameters */
94 long allot;
95 long quantum; /* Allotment per WRR round */
96 long weight; /* Relative allotment: see below */
97
98 struct Qdisc *qdisc; /* Ptr to CBQ discipline */
99 struct cbq_class *split; /* Ptr to split node */
100 struct cbq_class *share; /* Ptr to LS parent in the class tree */
101 struct cbq_class *tparent; /* Ptr to tree parent in the class tree */
102 struct cbq_class *borrow; /* NULL if class is bandwidth limited;
103 parent otherwise */
104 struct cbq_class *sibling; /* Sibling chain */
105 struct cbq_class *children; /* Pointer to children chain */
106
107 struct Qdisc *q; /* Elementary queueing discipline */
108
109
110/* Variables */
111 unsigned char cpriority; /* Effective priority */
112 unsigned char delayed;
113 unsigned char level; /* level of the class in hierarchy:
114 0 for leaf classes, and maximal
115 level of children + 1 for nodes.
116 */
117
118 psched_time_t last; /* Last end of service */
119 psched_time_t undertime;
120 long avgidle;
121 long deficit; /* Saved deficit for WRR */
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700122 psched_time_t penalized;
Eric Dumazetc1a8f1f2009-08-16 09:36:49 +0000123 struct gnet_stats_basic_packed bstats;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 struct gnet_stats_queue qstats;
Eric Dumazet45203a32013-06-06 08:43:22 -0700125 struct gnet_stats_rate_est64 rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126 struct tc_cbq_xstats xstats;
127
John Fastabend25d8c0d2014-09-12 20:05:27 -0700128 struct tcf_proto __rcu *filter_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 int refcnt;
131 int filters;
132
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000133 struct cbq_class *defaults[TC_PRIO_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134};
135
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000136struct cbq_sched_data {
Patrick McHardyd77fea22008-07-05 23:22:05 -0700137 struct Qdisc_class_hash clhash; /* Hash table of all classes */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000138 int nclasses[TC_CBQ_MAXPRIO + 1];
139 unsigned int quanta[TC_CBQ_MAXPRIO + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 struct cbq_class link;
142
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000143 unsigned int activemask;
144 struct cbq_class *active[TC_CBQ_MAXPRIO + 1]; /* List of all classes
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 with backlog */
146
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700147#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 struct cbq_class *rx_class;
149#endif
150 struct cbq_class *tx_class;
151 struct cbq_class *tx_borrowed;
152 int tx_len;
153 psched_time_t now; /* Cached timestamp */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000154 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
David S. Miller2fbd3da2009-09-01 17:59:25 -0700156 struct hrtimer delay_timer;
Patrick McHardy88a99352007-03-16 01:21:11 -0700157 struct qdisc_watchdog watchdog; /* Watchdog timer,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 started when CBQ has
159 backlog, but cannot
160 transmit just now */
Patrick McHardy88a99352007-03-16 01:21:11 -0700161 psched_tdiff_t wd_expires;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 int toplevel;
163 u32 hgenerator;
164};
165
166
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000167#define L2T(cl, len) qdisc_l2t((cl)->R_tab, len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000169static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170cbq_class_lookup(struct cbq_sched_data *q, u32 classid)
171{
Patrick McHardyd77fea22008-07-05 23:22:05 -0700172 struct Qdisc_class_common *clc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173
Patrick McHardyd77fea22008-07-05 23:22:05 -0700174 clc = qdisc_class_find(&q->clhash, classid);
175 if (clc == NULL)
176 return NULL;
177 return container_of(clc, struct cbq_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
179
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700180#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182static struct cbq_class *
183cbq_reclassify(struct sk_buff *skb, struct cbq_class *this)
184{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000185 struct cbq_class *cl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000187 for (cl = this->tparent; cl; cl = cl->tparent) {
188 struct cbq_class *new = cl->defaults[TC_PRIO_BESTEFFORT];
189
190 if (new != NULL && new != this)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 return new;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000192 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 return NULL;
194}
195
196#endif
197
198/* Classify packet. The procedure is pretty complicated, but
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000199 * it allows us to combine link sharing and priority scheduling
200 * transparently.
201 *
202 * Namely, you can put link sharing rules (f.e. route based) at root of CBQ,
203 * so that it resolves to split nodes. Then packets are classified
204 * by logical priority, or a more specific classifier may be attached
205 * to the split node.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 */
207
208static struct cbq_class *
209cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
210{
211 struct cbq_sched_data *q = qdisc_priv(sch);
212 struct cbq_class *head = &q->link;
213 struct cbq_class **defmap;
214 struct cbq_class *cl = NULL;
215 u32 prio = skb->priority;
John Fastabend25d8c0d2014-09-12 20:05:27 -0700216 struct tcf_proto *fl;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 struct tcf_result res;
218
219 /*
220 * Step 1. If skb->priority points to one of our classes, use it.
221 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000222 if (TC_H_MAJ(prio ^ sch->handle) == 0 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 (cl = cbq_class_lookup(q, prio)) != NULL)
224 return cl;
225
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700226 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 for (;;) {
228 int result = 0;
229 defmap = head->defaults;
230
John Fastabend25d8c0d2014-09-12 20:05:27 -0700231 fl = rcu_dereference_bh(head->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 /*
233 * Step 2+n. Apply classifier.
234 */
Daniel Borkmann3b3ae882015-08-26 23:00:06 +0200235 result = tc_classify(skb, fl, &res, true);
John Fastabend25d8c0d2014-09-12 20:05:27 -0700236 if (!fl || result < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 goto fallback;
238
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000239 cl = (void *)res.class;
240 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 if (TC_H_MAJ(res.classid))
242 cl = cbq_class_lookup(q, res.classid);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000243 else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 cl = defmap[TC_PRIO_BESTEFFORT];
245
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000246 if (cl == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 goto fallback;
248 }
Eric Dumazetbdfc87f2012-09-11 13:11:12 +0000249 if (cl->level >= head->level)
250 goto fallback;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251#ifdef CONFIG_NET_CLS_ACT
252 switch (result) {
253 case TC_ACT_QUEUED:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900254 case TC_ACT_STOLEN:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700255 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 case TC_ACT_SHOT:
257 return NULL;
Patrick McHardy73ca4912007-07-15 00:02:31 -0700258 case TC_ACT_RECLASSIFY:
259 return cbq_reclassify(skb, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261#endif
262 if (cl->level == 0)
263 return cl;
264
265 /*
266 * Step 3+n. If classifier selected a link sharing class,
267 * apply agency specific classifier.
268 * Repeat this procdure until we hit a leaf node.
269 */
270 head = cl;
271 }
272
273fallback:
274 cl = head;
275
276 /*
277 * Step 4. No success...
278 */
279 if (TC_H_MAJ(prio) == 0 &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000280 !(cl = head->defaults[prio & TC_PRIO_MAX]) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 !(cl = head->defaults[TC_PRIO_BESTEFFORT]))
282 return head;
283
284 return cl;
285}
286
287/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000288 * A packet has just been enqueued on the empty class.
289 * cbq_activate_class adds it to the tail of active class list
290 * of its priority band.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 */
292
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000293static inline void cbq_activate_class(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294{
295 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
296 int prio = cl->cpriority;
297 struct cbq_class *cl_tail;
298
299 cl_tail = q->active[prio];
300 q->active[prio] = cl;
301
302 if (cl_tail != NULL) {
303 cl->next_alive = cl_tail->next_alive;
304 cl_tail->next_alive = cl;
305 } else {
306 cl->next_alive = cl;
307 q->activemask |= (1<<prio);
308 }
309}
310
311/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000312 * Unlink class from active chain.
313 * Note that this same procedure is done directly in cbq_dequeue*
314 * during round-robin procedure.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 */
316
317static void cbq_deactivate_class(struct cbq_class *this)
318{
319 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
320 int prio = this->cpriority;
321 struct cbq_class *cl;
322 struct cbq_class *cl_prev = q->active[prio];
323
324 do {
325 cl = cl_prev->next_alive;
326 if (cl == this) {
327 cl_prev->next_alive = cl->next_alive;
328 cl->next_alive = NULL;
329
330 if (cl == q->active[prio]) {
331 q->active[prio] = cl_prev;
332 if (cl == q->active[prio]) {
333 q->active[prio] = NULL;
334 q->activemask &= ~(1<<prio);
335 return;
336 }
337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 return;
339 }
340 } while ((cl_prev = cl) != q->active[prio]);
341}
342
343static void
344cbq_mark_toplevel(struct cbq_sched_data *q, struct cbq_class *cl)
345{
346 int toplevel = q->toplevel;
347
Eric Dumazetcca605d2016-06-10 16:41:37 -0700348 if (toplevel > cl->level) {
Vasily Averin7201c1d2014-08-14 12:27:59 +0400349 psched_time_t now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
351 do {
Patrick McHardy104e0872007-03-23 11:28:07 -0700352 if (cl->undertime < now) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 q->toplevel = cl->level;
354 return;
355 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000356 } while ((cl = cl->borrow) != NULL && toplevel > cl->level);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 }
358}
359
360static int
361cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
362{
363 struct cbq_sched_data *q = qdisc_priv(sch);
Satyam Sharmaddeee3c2007-09-16 14:54:05 -0700364 int uninitialized_var(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 struct cbq_class *cl = cbq_classify(skb, sch, &ret);
366
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -0700367#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 q->rx_class = cl;
369#endif
370 if (cl == NULL) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700371 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700372 qdisc_qstats_drop(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 kfree_skb(skb);
374 return ret;
375 }
376
Jussi Kivilinna5f861732008-07-20 00:08:04 -0700377 ret = qdisc_enqueue(skb, cl->q);
378 if (ret == NET_XMIT_SUCCESS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 sch->q.qlen++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 cbq_mark_toplevel(q, cl);
381 if (!cl->next_alive)
382 cbq_activate_class(cl);
383 return ret;
384 }
385
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700386 if (net_xmit_drop_count(ret)) {
John Fastabend25331d62014-09-28 11:53:29 -0700387 qdisc_qstats_drop(sch);
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700388 cbq_mark_toplevel(q, cl);
389 cl->qstats.drops++;
390 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 return ret;
392}
393
Florian Westphalc3498d32016-06-09 00:27:39 +0200394/* Overlimit action: penalize leaf class by adding offtime */
395static void cbq_overlimit(struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396{
397 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700398 psched_tdiff_t delay = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399
400 if (!cl->delayed) {
401 delay += cl->offtime;
402
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900403 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000404 * Class goes to sleep, so that it will have no
405 * chance to work avgidle. Let's forgive it 8)
406 *
407 * BTW cbq-2.0 has a crap in this
408 * place, apparently they forgot to shift it by cl->ewma_log.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 */
410 if (cl->avgidle < 0)
411 delay -= (-cl->avgidle) - ((-cl->avgidle) >> cl->ewma_log);
412 if (cl->avgidle < cl->minidle)
413 cl->avgidle = cl->minidle;
414 if (delay <= 0)
415 delay = 1;
Patrick McHardy7c59e252007-03-23 11:27:45 -0700416 cl->undertime = q->now + delay;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
418 cl->xstats.overactions++;
419 cl->delayed = 1;
420 }
421 if (q->wd_expires == 0 || q->wd_expires > delay)
422 q->wd_expires = delay;
423
424 /* Dirty work! We must schedule wakeups based on
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000425 * real available rate, rather than leaf rate,
426 * which may be tiny (even zero).
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 */
428 if (q->toplevel == TC_CBQ_MAXLEVEL) {
429 struct cbq_class *b;
430 psched_tdiff_t base_delay = q->wd_expires;
431
432 for (b = cl->borrow; b; b = b->borrow) {
Patrick McHardy8edc0c32007-03-23 11:28:55 -0700433 delay = b->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 if (delay < base_delay) {
435 if (delay <= 0)
436 delay = 1;
437 base_delay = delay;
438 }
439 }
440
441 q->wd_expires = base_delay;
442 }
443}
444
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700445static psched_tdiff_t cbq_undelay_prio(struct cbq_sched_data *q, int prio,
446 psched_time_t now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
448 struct cbq_class *cl;
449 struct cbq_class *cl_prev = q->active[prio];
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700450 psched_time_t sched = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
452 if (cl_prev == NULL)
Patrick McHardye9054a32007-03-16 01:21:40 -0700453 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454
455 do {
456 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700457 if (now - cl->penalized > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700458 cl_prev->next_alive = cl->next_alive;
459 cl->next_alive = NULL;
460 cl->cpriority = cl->priority;
461 cl->delayed = 0;
462 cbq_activate_class(cl);
463
464 if (cl == q->active[prio]) {
465 q->active[prio] = cl_prev;
466 if (cl == q->active[prio]) {
467 q->active[prio] = NULL;
468 return 0;
469 }
470 }
471
472 cl = cl_prev->next_alive;
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700473 } else if (sched - cl->penalized > 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474 sched = cl->penalized;
475 } while ((cl_prev = cl) != q->active[prio]);
476
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700477 return sched - now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478}
479
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700480static enum hrtimer_restart cbq_undelay(struct hrtimer *timer)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481{
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700482 struct cbq_sched_data *q = container_of(timer, struct cbq_sched_data,
David S. Miller2fbd3da2009-09-01 17:59:25 -0700483 delay_timer);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700484 struct Qdisc *sch = q->watchdog.qdisc;
485 psched_time_t now;
486 psched_tdiff_t delay = 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000487 unsigned int pmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700489 now = psched_get_time();
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 pmask = q->pmask;
492 q->pmask = 0;
493
494 while (pmask) {
495 int prio = ffz(~pmask);
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700496 psched_tdiff_t tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 pmask &= ~(1<<prio);
499
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700500 tmp = cbq_undelay_prio(q, prio, now);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 if (tmp > 0) {
502 q->pmask |= 1<<prio;
503 if (tmp < delay || delay == 0)
504 delay = tmp;
505 }
506 }
507
508 if (delay) {
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700509 ktime_t time;
510
511 time = ktime_set(0, 0);
Jarek Poplawskica44d6e2009-06-15 02:31:47 -0700512 time = ktime_add_ns(time, PSCHED_TICKS2NS(now + delay));
Eric Dumazet4a8e3202014-09-20 18:01:30 -0700513 hrtimer_start(&q->delay_timer, time, HRTIMER_MODE_ABS_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 }
515
Eric Dumazetfd245a42011-01-20 05:27:16 +0000516 qdisc_unthrottled(sch);
David S. Miller8608db02008-08-18 20:51:18 -0700517 __netif_schedule(qdisc_root(sch));
Patrick McHardy1a13cb62007-03-16 01:22:20 -0700518 return HRTIMER_NORESTART;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900521/*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000522 * It is mission critical procedure.
523 *
524 * We "regenerate" toplevel cutoff, if transmitting class
525 * has backlog and it is not regulated. It is not part of
526 * original CBQ description, but looks more reasonable.
527 * Probably, it is wrong. This question needs further investigation.
528 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000530static inline void
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531cbq_update_toplevel(struct cbq_sched_data *q, struct cbq_class *cl,
532 struct cbq_class *borrowed)
533{
534 if (cl && q->toplevel >= borrowed->level) {
535 if (cl->q->q.qlen > 1) {
536 do {
Patrick McHardya0849802007-03-23 11:28:30 -0700537 if (borrowed->undertime == PSCHED_PASTPERFECT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538 q->toplevel = borrowed->level;
539 return;
540 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000541 } while ((borrowed = borrowed->borrow) != NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 }
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900543#if 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 /* It is not necessary now. Uncommenting it
545 will save CPU cycles, but decrease fairness.
546 */
547 q->toplevel = TC_CBQ_MAXLEVEL;
548#endif
549 }
550}
551
552static void
553cbq_update(struct cbq_sched_data *q)
554{
555 struct cbq_class *this = q->tx_class;
556 struct cbq_class *cl = this;
557 int len = q->tx_len;
Vasily Averin73d0f372014-08-14 12:27:47 +0400558 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
560 q->tx_class = NULL;
Vasily Averin73d0f372014-08-14 12:27:47 +0400561 /* Time integrator. We calculate EOS time
562 * by adding expected packet transmission time.
563 */
564 now = q->now + L2T(&q->link, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565
566 for ( ; cl; cl = cl->share) {
567 long avgidle = cl->avgidle;
568 long idle;
569
570 cl->bstats.packets++;
571 cl->bstats.bytes += len;
572
573 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000574 * (now - last) is total time between packet right edges.
575 * (last_pktlen/rate) is "virtual" busy time, so that
576 *
577 * idle = (now - last) - last_pktlen/rate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 */
579
Vasily Averin73d0f372014-08-14 12:27:47 +0400580 idle = now - cl->last;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 if ((unsigned long)idle > 128*1024*1024) {
582 avgidle = cl->maxidle;
583 } else {
584 idle -= L2T(cl, len);
585
586 /* true_avgidle := (1-W)*true_avgidle + W*idle,
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000587 * where W=2^{-ewma_log}. But cl->avgidle is scaled:
588 * cl->avgidle == true_avgidle/W,
589 * hence:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 */
591 avgidle += idle - (avgidle>>cl->ewma_log);
592 }
593
594 if (avgidle <= 0) {
595 /* Overlimit or at-limit */
596
597 if (avgidle < cl->minidle)
598 avgidle = cl->minidle;
599
600 cl->avgidle = avgidle;
601
602 /* Calculate expected time, when this class
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000603 * will be allowed to send.
604 * It will occur, when:
605 * (1-W)*true_avgidle + W*delay = 0, i.e.
606 * idle = (1/W - 1)*(-true_avgidle)
607 * or
608 * idle = (1 - W)*(-cl->avgidle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 */
610 idle = (-avgidle) - ((-avgidle) >> cl->ewma_log);
611
612 /*
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000613 * That is not all.
614 * To maintain the rate allocated to the class,
615 * we add to undertime virtual clock,
616 * necessary to complete transmitted packet.
617 * (len/phys_bandwidth has been already passed
618 * to the moment of cbq_update)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 */
620
621 idle -= L2T(&q->link, len);
622 idle += L2T(cl, len);
623
Vasily Averin73d0f372014-08-14 12:27:47 +0400624 cl->undertime = now + idle;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 } else {
626 /* Underlimit */
627
Patrick McHardya0849802007-03-23 11:28:30 -0700628 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 if (avgidle > cl->maxidle)
630 cl->avgidle = cl->maxidle;
631 else
632 cl->avgidle = avgidle;
633 }
Vasily Averin73d0f372014-08-14 12:27:47 +0400634 if ((s64)(now - cl->last) > 0)
635 cl->last = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 }
637
638 cbq_update_toplevel(q, this, q->tx_borrowed);
639}
640
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000641static inline struct cbq_class *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642cbq_under_limit(struct cbq_class *cl)
643{
644 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
645 struct cbq_class *this_cl = cl;
646
647 if (cl->tparent == NULL)
648 return cl;
649
Patrick McHardya0849802007-03-23 11:28:30 -0700650 if (cl->undertime == PSCHED_PASTPERFECT || q->now >= cl->undertime) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 cl->delayed = 0;
652 return cl;
653 }
654
655 do {
656 /* It is very suspicious place. Now overlimit
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000657 * action is generated for not bounded classes
658 * only if link is completely congested.
659 * Though it is in agree with ancestor-only paradigm,
660 * it looks very stupid. Particularly,
661 * it means that this chunk of code will either
662 * never be called or result in strong amplification
663 * of burstiness. Dangerous, silly, and, however,
664 * no another solution exists.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000666 cl = cl->borrow;
667 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 this_cl->qstats.overlimits++;
Florian Westphalc3498d32016-06-09 00:27:39 +0200669 cbq_overlimit(this_cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 return NULL;
671 }
672 if (cl->level > q->toplevel)
673 return NULL;
Patrick McHardya0849802007-03-23 11:28:30 -0700674 } while (cl->undertime != PSCHED_PASTPERFECT && q->now < cl->undertime);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
676 cl->delayed = 0;
677 return cl;
678}
679
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000680static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681cbq_dequeue_prio(struct Qdisc *sch, int prio)
682{
683 struct cbq_sched_data *q = qdisc_priv(sch);
684 struct cbq_class *cl_tail, *cl_prev, *cl;
685 struct sk_buff *skb;
686 int deficit;
687
688 cl_tail = cl_prev = q->active[prio];
689 cl = cl_prev->next_alive;
690
691 do {
692 deficit = 0;
693
694 /* Start round */
695 do {
696 struct cbq_class *borrow = cl;
697
698 if (cl->q->q.qlen &&
699 (borrow = cbq_under_limit(cl)) == NULL)
700 goto skip_class;
701
702 if (cl->deficit <= 0) {
703 /* Class exhausted its allotment per
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000704 * this round. Switch to the next one.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 */
706 deficit = 1;
707 cl->deficit += cl->quantum;
708 goto next_class;
709 }
710
711 skb = cl->q->dequeue(cl->q);
712
713 /* Class did not give us any skb :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000714 * It could occur even if cl->q->q.qlen != 0
715 * f.e. if cl->q == "tbf"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 */
717 if (skb == NULL)
718 goto skip_class;
719
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700720 cl->deficit -= qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 q->tx_class = cl;
722 q->tx_borrowed = borrow;
723 if (borrow != cl) {
724#ifndef CBQ_XSTATS_BORROWS_BYTES
725 borrow->xstats.borrows++;
726 cl->xstats.borrows++;
727#else
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700728 borrow->xstats.borrows += qdisc_pkt_len(skb);
729 cl->xstats.borrows += qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730#endif
731 }
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700732 q->tx_len = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 if (cl->deficit <= 0) {
735 q->active[prio] = cl;
736 cl = cl->next_alive;
737 cl->deficit += cl->quantum;
738 }
739 return skb;
740
741skip_class:
742 if (cl->q->q.qlen == 0 || prio != cl->cpriority) {
743 /* Class is empty or penalized.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000744 * Unlink it from active chain.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 */
746 cl_prev->next_alive = cl->next_alive;
747 cl->next_alive = NULL;
748
749 /* Did cl_tail point to it? */
750 if (cl == cl_tail) {
751 /* Repair it! */
752 cl_tail = cl_prev;
753
754 /* Was it the last class in this band? */
755 if (cl == cl_tail) {
756 /* Kill the band! */
757 q->active[prio] = NULL;
758 q->activemask &= ~(1<<prio);
759 if (cl->q->q.qlen)
760 cbq_activate_class(cl);
761 return NULL;
762 }
763
764 q->active[prio] = cl_tail;
765 }
766 if (cl->q->q.qlen)
767 cbq_activate_class(cl);
768
769 cl = cl_prev;
770 }
771
772next_class:
773 cl_prev = cl;
774 cl = cl->next_alive;
775 } while (cl_prev != cl_tail);
776 } while (deficit);
777
778 q->active[prio] = cl_prev;
779
780 return NULL;
781}
782
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000783static inline struct sk_buff *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784cbq_dequeue_1(struct Qdisc *sch)
785{
786 struct cbq_sched_data *q = qdisc_priv(sch);
787 struct sk_buff *skb;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000788 unsigned int activemask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000790 activemask = q->activemask & 0xFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 while (activemask) {
792 int prio = ffz(~activemask);
793 activemask &= ~(1<<prio);
794 skb = cbq_dequeue_prio(sch, prio);
795 if (skb)
796 return skb;
797 }
798 return NULL;
799}
800
801static struct sk_buff *
802cbq_dequeue(struct Qdisc *sch)
803{
804 struct sk_buff *skb;
805 struct cbq_sched_data *q = qdisc_priv(sch);
806 psched_time_t now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807
Patrick McHardy3bebcda2007-03-23 11:29:25 -0700808 now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809
Vasily Averin73d0f372014-08-14 12:27:47 +0400810 if (q->tx_class)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 cbq_update(q);
Vasily Averin73d0f372014-08-14 12:27:47 +0400812
813 q->now = now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814
815 for (;;) {
816 q->wd_expires = 0;
817
818 skb = cbq_dequeue_1(sch);
819 if (skb) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800820 qdisc_bstats_update(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 sch->q.qlen--;
Eric Dumazetfd245a42011-01-20 05:27:16 +0000822 qdisc_unthrottled(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 return skb;
824 }
825
826 /* All the classes are overlimit.
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000827 *
828 * It is possible, if:
829 *
830 * 1. Scheduler is empty.
831 * 2. Toplevel cutoff inhibited borrowing.
832 * 3. Root class is overlimit.
833 *
834 * Reset 2d and 3d conditions and retry.
835 *
836 * Note, that NS and cbq-2.0 are buggy, peeking
837 * an arbitrary class is appropriate for ancestor-only
838 * sharing, but not for toplevel algorithm.
839 *
840 * Our version is better, but slower, because it requires
841 * two passes, but it is unavoidable with top-level sharing.
842 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
844 if (q->toplevel == TC_CBQ_MAXLEVEL &&
Patrick McHardya0849802007-03-23 11:28:30 -0700845 q->link.undertime == PSCHED_PASTPERFECT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 break;
847
848 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardya0849802007-03-23 11:28:30 -0700849 q->link.undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 }
851
852 /* No packets in scheduler or nobody wants to give them to us :-(
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000853 * Sigh... start watchdog timer in the last case.
854 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
856 if (sch->q.qlen) {
John Fastabend25331d62014-09-28 11:53:29 -0700857 qdisc_qstats_overlimit(sch);
Patrick McHardy88a99352007-03-16 01:21:11 -0700858 if (q->wd_expires)
859 qdisc_watchdog_schedule(&q->watchdog,
Patrick McHardybb239ac2007-03-16 12:31:28 -0700860 now + q->wd_expires);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 }
862 return NULL;
863}
864
865/* CBQ class maintanance routines */
866
867static void cbq_adjust_levels(struct cbq_class *this)
868{
869 if (this == NULL)
870 return;
871
872 do {
873 int level = 0;
874 struct cbq_class *cl;
875
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000876 cl = this->children;
877 if (cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 do {
879 if (cl->level > level)
880 level = cl->level;
881 } while ((cl = cl->sibling) != this->children);
882 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000883 this->level = level + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 } while ((this = this->tparent) != NULL);
885}
886
887static void cbq_normalize_quanta(struct cbq_sched_data *q, int prio)
888{
889 struct cbq_class *cl;
Patrick McHardyd77fea22008-07-05 23:22:05 -0700890 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891
892 if (q->quanta[prio] == 0)
893 return;
894
Patrick McHardyd77fea22008-07-05 23:22:05 -0700895 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800896 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 /* BUGGGG... Beware! This expression suffer of
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000898 * arithmetic overflows!
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 */
900 if (cl->priority == prio) {
901 cl->quantum = (cl->weight*cl->allot*q->nclasses[prio])/
902 q->quanta[prio];
903 }
Yang Yingliang833fa742013-12-10 20:55:32 +0800904 if (cl->quantum <= 0 ||
905 cl->quantum > 32*qdisc_dev(cl->qdisc)->mtu) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800906 pr_warn("CBQ: class %08x has bad quantum==%ld, repaired.\n",
907 cl->common.classid, cl->quantum);
David S. Miller5ce2d482008-07-08 17:06:30 -0700908 cl->quantum = qdisc_dev(cl->qdisc)->mtu/2 + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 }
910 }
911 }
912}
913
914static void cbq_sync_defmap(struct cbq_class *cl)
915{
916 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
917 struct cbq_class *split = cl->split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000918 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 int i;
920
921 if (split == NULL)
922 return;
923
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000924 for (i = 0; i <= TC_PRIO_MAX; i++) {
925 if (split->defaults[i] == cl && !(cl->defmap & (1<<i)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 split->defaults[i] = NULL;
927 }
928
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000929 for (i = 0; i <= TC_PRIO_MAX; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 int level = split->level;
931
932 if (split->defaults[i])
933 continue;
934
Patrick McHardyd77fea22008-07-05 23:22:05 -0700935 for (h = 0; h < q->clhash.hashsize; h++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 struct cbq_class *c;
937
Sasha Levinb67bfe02013-02-27 17:06:00 -0800938 hlist_for_each_entry(c, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -0700939 common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (c->split == split && c->level < level &&
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000941 c->defmap & (1<<i)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 split->defaults[i] = c;
943 level = c->level;
944 }
945 }
946 }
947 }
948}
949
950static void cbq_change_defmap(struct cbq_class *cl, u32 splitid, u32 def, u32 mask)
951{
952 struct cbq_class *split = NULL;
953
954 if (splitid == 0) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000955 split = cl->split;
956 if (!split)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 return;
Patrick McHardyd77fea22008-07-05 23:22:05 -0700958 splitid = split->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 }
960
Patrick McHardyd77fea22008-07-05 23:22:05 -0700961 if (split == NULL || split->common.classid != splitid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 for (split = cl->tparent; split; split = split->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -0700963 if (split->common.classid == splitid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 break;
965 }
966
967 if (split == NULL)
968 return;
969
970 if (cl->split != split) {
971 cl->defmap = 0;
972 cbq_sync_defmap(cl);
973 cl->split = split;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000974 cl->defmap = def & mask;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 } else
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000976 cl->defmap = (cl->defmap & ~mask) | (def & mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977
978 cbq_sync_defmap(cl);
979}
980
981static void cbq_unlink_class(struct cbq_class *this)
982{
983 struct cbq_class *cl, **clp;
984 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
985
Patrick McHardyd77fea22008-07-05 23:22:05 -0700986 qdisc_class_hash_remove(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
988 if (this->tparent) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000989 clp = &this->sibling;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 cl = *clp;
991 do {
992 if (cl == this) {
993 *clp = cl->sibling;
994 break;
995 }
996 clp = &cl->sibling;
997 } while ((cl = *clp) != this->sibling);
998
999 if (this->tparent->children == this) {
1000 this->tparent->children = this->sibling;
1001 if (this->sibling == this)
1002 this->tparent->children = NULL;
1003 }
1004 } else {
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001005 WARN_ON(this->sibling != this);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 }
1007}
1008
1009static void cbq_link_class(struct cbq_class *this)
1010{
1011 struct cbq_sched_data *q = qdisc_priv(this->qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 struct cbq_class *parent = this->tparent;
1013
1014 this->sibling = this;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001015 qdisc_class_hash_insert(&q->clhash, &this->common);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
1017 if (parent == NULL)
1018 return;
1019
1020 if (parent->children == NULL) {
1021 parent->children = this;
1022 } else {
1023 this->sibling = parent->children->sibling;
1024 parent->children->sibling = this;
1025 }
1026}
1027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028static void
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001029cbq_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030{
1031 struct cbq_sched_data *q = qdisc_priv(sch);
1032 struct cbq_class *cl;
1033 int prio;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001034 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035
1036 q->activemask = 0;
1037 q->pmask = 0;
1038 q->tx_class = NULL;
1039 q->tx_borrowed = NULL;
Patrick McHardy88a99352007-03-16 01:21:11 -07001040 qdisc_watchdog_cancel(&q->watchdog);
David S. Miller2fbd3da2009-09-01 17:59:25 -07001041 hrtimer_cancel(&q->delay_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001043 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044
1045 for (prio = 0; prio <= TC_CBQ_MAXPRIO; prio++)
1046 q->active[prio] = NULL;
1047
Patrick McHardyd77fea22008-07-05 23:22:05 -07001048 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001049 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 qdisc_reset(cl->q);
1051
1052 cl->next_alive = NULL;
Patrick McHardya0849802007-03-23 11:28:30 -07001053 cl->undertime = PSCHED_PASTPERFECT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 cl->avgidle = cl->maxidle;
1055 cl->deficit = cl->quantum;
1056 cl->cpriority = cl->priority;
1057 }
1058 }
1059 sch->q.qlen = 0;
1060}
1061
1062
1063static int cbq_set_lss(struct cbq_class *cl, struct tc_cbq_lssopt *lss)
1064{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001065 if (lss->change & TCF_CBQ_LSS_FLAGS) {
1066 cl->share = (lss->flags & TCF_CBQ_LSS_ISOLATED) ? NULL : cl->tparent;
1067 cl->borrow = (lss->flags & TCF_CBQ_LSS_BOUNDED) ? NULL : cl->tparent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001069 if (lss->change & TCF_CBQ_LSS_EWMA)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 cl->ewma_log = lss->ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001071 if (lss->change & TCF_CBQ_LSS_AVPKT)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 cl->avpkt = lss->avpkt;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001073 if (lss->change & TCF_CBQ_LSS_MINIDLE)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 cl->minidle = -(long)lss->minidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001075 if (lss->change & TCF_CBQ_LSS_MAXIDLE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 cl->maxidle = lss->maxidle;
1077 cl->avgidle = lss->maxidle;
1078 }
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001079 if (lss->change & TCF_CBQ_LSS_OFFTIME)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 cl->offtime = lss->offtime;
1081 return 0;
1082}
1083
1084static void cbq_rmprio(struct cbq_sched_data *q, struct cbq_class *cl)
1085{
1086 q->nclasses[cl->priority]--;
1087 q->quanta[cl->priority] -= cl->weight;
1088 cbq_normalize_quanta(q, cl->priority);
1089}
1090
1091static void cbq_addprio(struct cbq_sched_data *q, struct cbq_class *cl)
1092{
1093 q->nclasses[cl->priority]++;
1094 q->quanta[cl->priority] += cl->weight;
1095 cbq_normalize_quanta(q, cl->priority);
1096}
1097
1098static int cbq_set_wrr(struct cbq_class *cl, struct tc_cbq_wrropt *wrr)
1099{
1100 struct cbq_sched_data *q = qdisc_priv(cl->qdisc);
1101
1102 if (wrr->allot)
1103 cl->allot = wrr->allot;
1104 if (wrr->weight)
1105 cl->weight = wrr->weight;
1106 if (wrr->priority) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001107 cl->priority = wrr->priority - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 cl->cpriority = cl->priority;
1109 if (cl->priority >= cl->priority2)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001110 cl->priority2 = TC_CBQ_MAXPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 }
1112
1113 cbq_addprio(q, cl);
1114 return 0;
1115}
1116
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117static int cbq_set_fopt(struct cbq_class *cl, struct tc_cbq_fopt *fopt)
1118{
1119 cbq_change_defmap(cl, fopt->split, fopt->defmap, fopt->defchange);
1120 return 0;
1121}
1122
Patrick McHardy27a34212008-01-23 20:35:39 -08001123static const struct nla_policy cbq_policy[TCA_CBQ_MAX + 1] = {
1124 [TCA_CBQ_LSSOPT] = { .len = sizeof(struct tc_cbq_lssopt) },
1125 [TCA_CBQ_WRROPT] = { .len = sizeof(struct tc_cbq_wrropt) },
1126 [TCA_CBQ_FOPT] = { .len = sizeof(struct tc_cbq_fopt) },
1127 [TCA_CBQ_OVL_STRATEGY] = { .len = sizeof(struct tc_cbq_ovl) },
1128 [TCA_CBQ_RATE] = { .len = sizeof(struct tc_ratespec) },
1129 [TCA_CBQ_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1130 [TCA_CBQ_POLICE] = { .len = sizeof(struct tc_cbq_police) },
1131};
1132
Patrick McHardy1e904742008-01-22 22:11:17 -08001133static int cbq_init(struct Qdisc *sch, struct nlattr *opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134{
1135 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -08001136 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001137 struct tc_ratespec *r;
Patrick McHardycee63722008-01-23 20:33:32 -08001138 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139
Patrick McHardy27a34212008-01-23 20:35:39 -08001140 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001141 if (err < 0)
1142 return err;
1143
Patrick McHardy27a34212008-01-23 20:35:39 -08001144 if (tb[TCA_CBQ_RTAB] == NULL || tb[TCA_CBQ_RATE] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145 return -EINVAL;
1146
Patrick McHardy1e904742008-01-22 22:11:17 -08001147 r = nla_data(tb[TCA_CBQ_RATE]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
Patrick McHardy1e904742008-01-22 22:11:17 -08001149 if ((q->link.R_tab = qdisc_get_rtab(r, tb[TCA_CBQ_RTAB])) == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150 return -EINVAL;
1151
Patrick McHardyd77fea22008-07-05 23:22:05 -07001152 err = qdisc_class_hash_init(&q->clhash);
1153 if (err < 0)
1154 goto put_rtab;
1155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 q->link.refcnt = 1;
1157 q->link.sibling = &q->link;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001158 q->link.common.classid = sch->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 q->link.qdisc = sch;
Changli Gao3511c912010-10-16 13:04:08 +00001160 q->link.q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1161 sch->handle);
1162 if (!q->link.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 q->link.q = &noop_qdisc;
1164
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001165 q->link.priority = TC_CBQ_MAXPRIO - 1;
1166 q->link.priority2 = TC_CBQ_MAXPRIO - 1;
1167 q->link.cpriority = TC_CBQ_MAXPRIO - 1;
David S. Miller5ce2d482008-07-08 17:06:30 -07001168 q->link.allot = psched_mtu(qdisc_dev(sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 q->link.quantum = q->link.allot;
1170 q->link.weight = q->link.R_tab->rate.rate;
1171
1172 q->link.ewma_log = TC_CBQ_DEF_EWMA;
1173 q->link.avpkt = q->link.allot/2;
1174 q->link.minidle = -0x7FFFFFFF;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001175
Patrick McHardy88a99352007-03-16 01:21:11 -07001176 qdisc_watchdog_init(&q->watchdog, sch);
Eric Dumazet4a8e3202014-09-20 18:01:30 -07001177 hrtimer_init(&q->delay_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 q->delay_timer.function = cbq_undelay;
1179 q->toplevel = TC_CBQ_MAXLEVEL;
Patrick McHardy3bebcda2007-03-23 11:29:25 -07001180 q->now = psched_get_time();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181
1182 cbq_link_class(&q->link);
1183
Patrick McHardy1e904742008-01-22 22:11:17 -08001184 if (tb[TCA_CBQ_LSSOPT])
1185 cbq_set_lss(&q->link, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
1187 cbq_addprio(q, &q->link);
1188 return 0;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001189
1190put_rtab:
1191 qdisc_put_rtab(q->link.R_tab);
1192 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193}
1194
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001195static int cbq_dump_rate(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001197 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
David S. Miller1b34ec42012-03-29 05:11:39 -04001199 if (nla_put(skb, TCA_CBQ_RATE, sizeof(cl->R_tab->rate), &cl->R_tab->rate))
1200 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 return skb->len;
1202
Patrick McHardy1e904742008-01-22 22:11:17 -08001203nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001204 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 return -1;
1206}
1207
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001208static int cbq_dump_lss(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001210 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 struct tc_cbq_lssopt opt;
1212
1213 opt.flags = 0;
1214 if (cl->borrow == NULL)
1215 opt.flags |= TCF_CBQ_LSS_BOUNDED;
1216 if (cl->share == NULL)
1217 opt.flags |= TCF_CBQ_LSS_ISOLATED;
1218 opt.ewma_log = cl->ewma_log;
1219 opt.level = cl->level;
1220 opt.avpkt = cl->avpkt;
1221 opt.maxidle = cl->maxidle;
1222 opt.minidle = (u32)(-cl->minidle);
1223 opt.offtime = cl->offtime;
1224 opt.change = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001225 if (nla_put(skb, TCA_CBQ_LSSOPT, sizeof(opt), &opt))
1226 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 return skb->len;
1228
Patrick McHardy1e904742008-01-22 22:11:17 -08001229nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001230 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 return -1;
1232}
1233
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001234static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001236 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 struct tc_cbq_wrropt opt;
1238
David S. Millera0db8562013-07-30 00:16:21 -07001239 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 opt.flags = 0;
1241 opt.allot = cl->allot;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001242 opt.priority = cl->priority + 1;
1243 opt.cpriority = cl->cpriority + 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 opt.weight = cl->weight;
David S. Miller1b34ec42012-03-29 05:11:39 -04001245 if (nla_put(skb, TCA_CBQ_WRROPT, sizeof(opt), &opt))
1246 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 return skb->len;
1248
Patrick McHardy1e904742008-01-22 22:11:17 -08001249nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001250 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251 return -1;
1252}
1253
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001254static int cbq_dump_fopt(struct sk_buff *skb, struct cbq_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255{
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07001256 unsigned char *b = skb_tail_pointer(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 struct tc_cbq_fopt opt;
1258
1259 if (cl->split || cl->defmap) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001260 opt.split = cl->split ? cl->split->common.classid : 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 opt.defmap = cl->defmap;
1262 opt.defchange = ~0;
David S. Miller1b34ec42012-03-29 05:11:39 -04001263 if (nla_put(skb, TCA_CBQ_FOPT, sizeof(opt), &opt))
1264 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 }
1266 return skb->len;
1267
Patrick McHardy1e904742008-01-22 22:11:17 -08001268nla_put_failure:
Arnaldo Carvalho de Melodc5fc572007-03-25 23:06:12 -07001269 nlmsg_trim(skb, b);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 return -1;
1271}
1272
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273static int cbq_dump_attr(struct sk_buff *skb, struct cbq_class *cl)
1274{
1275 if (cbq_dump_lss(skb, cl) < 0 ||
1276 cbq_dump_rate(skb, cl) < 0 ||
1277 cbq_dump_wrr(skb, cl) < 0 ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 cbq_dump_fopt(skb, cl) < 0)
1279 return -1;
1280 return 0;
1281}
1282
1283static int cbq_dump(struct Qdisc *sch, struct sk_buff *skb)
1284{
1285 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001286 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001288 nest = nla_nest_start(skb, TCA_OPTIONS);
1289 if (nest == NULL)
1290 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 if (cbq_dump_attr(skb, &q->link) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001292 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001293 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294
Patrick McHardy1e904742008-01-22 22:11:17 -08001295nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001296 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 return -1;
1298}
1299
1300static int
1301cbq_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
1302{
1303 struct cbq_sched_data *q = qdisc_priv(sch);
1304
1305 q->link.xstats.avgidle = q->link.avgidle;
1306 return gnet_stats_copy_app(d, &q->link.xstats, sizeof(q->link.xstats));
1307}
1308
1309static int
1310cbq_dump_class(struct Qdisc *sch, unsigned long arg,
1311 struct sk_buff *skb, struct tcmsg *tcm)
1312{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001313 struct cbq_class *cl = (struct cbq_class *)arg;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001314 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
1316 if (cl->tparent)
Patrick McHardyd77fea22008-07-05 23:22:05 -07001317 tcm->tcm_parent = cl->tparent->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 else
1319 tcm->tcm_parent = TC_H_ROOT;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001320 tcm->tcm_handle = cl->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 tcm->tcm_info = cl->q->handle;
1322
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001323 nest = nla_nest_start(skb, TCA_OPTIONS);
1324 if (nest == NULL)
1325 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 if (cbq_dump_attr(skb, cl) < 0)
Patrick McHardy1e904742008-01-22 22:11:17 -08001327 goto nla_put_failure;
Yang Yingliangd59b7d82014-03-12 10:20:32 +08001328 return nla_nest_end(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329
Patrick McHardy1e904742008-01-22 22:11:17 -08001330nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001331 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 return -1;
1333}
1334
1335static int
1336cbq_dump_class_stats(struct Qdisc *sch, unsigned long arg,
1337 struct gnet_dump *d)
1338{
1339 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001340 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 cl->xstats.avgidle = cl->avgidle;
1343 cl->xstats.undertime = 0;
1344
Patrick McHardya0849802007-03-23 11:28:30 -07001345 if (cl->undertime != PSCHED_PASTPERFECT)
Patrick McHardy8edc0c32007-03-23 11:28:55 -07001346 cl->xstats.undertime = cl->undertime - q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001348 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1349 d, NULL, &cl->bstats) < 0 ||
Eric Dumazetd250a5f2009-10-02 10:32:18 +00001350 gnet_stats_copy_rate_est(d, &cl->bstats, &cl->rate_est) < 0 ||
John Fastabendb0ab6f92014-09-28 11:54:24 -07001351 gnet_stats_copy_queue(d, NULL, &cl->qstats, cl->q->q.qlen) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352 return -1;
1353
1354 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1355}
1356
1357static int cbq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1358 struct Qdisc **old)
1359{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001360 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001362 if (new == NULL) {
Changli Gao3511c912010-10-16 13:04:08 +00001363 new = qdisc_create_dflt(sch->dev_queue,
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001364 &pfifo_qdisc_ops, cl->common.classid);
1365 if (new == NULL)
1366 return -ENOBUFS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 }
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001368
WANG Cong86a79962016-02-25 14:55:00 -08001369 *old = qdisc_replace(sch, new, &cl->q);
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001370 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371}
1372
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001373static struct Qdisc *cbq_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001375 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001377 return cl->q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378}
1379
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001380static void cbq_qlen_notify(struct Qdisc *sch, unsigned long arg)
1381{
1382 struct cbq_class *cl = (struct cbq_class *)arg;
1383
1384 if (cl->q->q.qlen == 0)
1385 cbq_deactivate_class(cl);
1386}
1387
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388static unsigned long cbq_get(struct Qdisc *sch, u32 classid)
1389{
1390 struct cbq_sched_data *q = qdisc_priv(sch);
1391 struct cbq_class *cl = cbq_class_lookup(q, classid);
1392
1393 if (cl) {
1394 cl->refcnt++;
1395 return (unsigned long)cl;
1396 }
1397 return 0;
1398}
1399
Linus Torvalds1da177e2005-04-16 15:20:36 -07001400static void cbq_destroy_class(struct Qdisc *sch, struct cbq_class *cl)
1401{
1402 struct cbq_sched_data *q = qdisc_priv(sch);
1403
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001404 WARN_ON(cl->filters);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405
Patrick McHardyff31ab52008-07-01 19:52:38 -07001406 tcf_destroy_chain(&cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 qdisc_destroy(cl->q);
1408 qdisc_put_rtab(cl->R_tab);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 gen_kill_estimator(&cl->bstats, &cl->rate_est);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001410 if (cl != &q->link)
1411 kfree(cl);
1412}
1413
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001414static void cbq_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415{
1416 struct cbq_sched_data *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001417 struct hlist_node *next;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001419 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001421#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 q->rx_class = NULL;
1423#endif
1424 /*
1425 * Filters must be destroyed first because we don't destroy the
1426 * classes from root to leafs which means that filters can still
1427 * be bound to classes which have been destroyed already. --TGR '04
1428 */
Patrick McHardyd77fea22008-07-05 23:22:05 -07001429 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001430 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode)
Patrick McHardyff31ab52008-07-01 19:52:38 -07001431 tcf_destroy_chain(&cl->filter_list);
Patrick McHardyb00b4bf2007-06-05 16:06:59 -07001432 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001433 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001434 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[h],
Patrick McHardyd77fea22008-07-05 23:22:05 -07001435 common.hnode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 cbq_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 }
Patrick McHardyd77fea22008-07-05 23:22:05 -07001438 qdisc_class_hash_destroy(&q->clhash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439}
1440
1441static void cbq_put(struct Qdisc *sch, unsigned long arg)
1442{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001443 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444
1445 if (--cl->refcnt == 0) {
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001446#ifdef CONFIG_NET_CLS_ACT
Jarek Poplawski102396a2008-08-29 14:21:52 -07001447 spinlock_t *root_lock = qdisc_root_sleeping_lock(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 struct cbq_sched_data *q = qdisc_priv(sch);
1449
David S. Miller7698b4f2008-07-16 01:42:40 -07001450 spin_lock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 if (q->rx_class == cl)
1452 q->rx_class = NULL;
David S. Miller7698b4f2008-07-16 01:42:40 -07001453 spin_unlock_bh(root_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454#endif
1455
1456 cbq_destroy_class(sch, cl);
1457 }
1458}
1459
1460static int
Patrick McHardy1e904742008-01-22 22:11:17 -08001461cbq_change_class(struct Qdisc *sch, u32 classid, u32 parentid, struct nlattr **tca,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 unsigned long *arg)
1463{
1464 int err;
1465 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001466 struct cbq_class *cl = (struct cbq_class *)*arg;
Patrick McHardy1e904742008-01-22 22:11:17 -08001467 struct nlattr *opt = tca[TCA_OPTIONS];
1468 struct nlattr *tb[TCA_CBQ_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 struct cbq_class *parent;
1470 struct qdisc_rate_table *rtab = NULL;
1471
Patrick McHardycee63722008-01-23 20:33:32 -08001472 if (opt == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 return -EINVAL;
1474
Patrick McHardy27a34212008-01-23 20:35:39 -08001475 err = nla_parse_nested(tb, TCA_CBQ_MAX, opt, cbq_policy);
Patrick McHardycee63722008-01-23 20:33:32 -08001476 if (err < 0)
1477 return err;
1478
Florian Westphaldd47c1f2016-06-09 00:27:40 +02001479 if (tb[TCA_CBQ_OVL_STRATEGY] || tb[TCA_CBQ_POLICE])
Florian Westphalc3498d32016-06-09 00:27:39 +02001480 return -EOPNOTSUPP;
1481
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 if (cl) {
1483 /* Check parent */
1484 if (parentid) {
Patrick McHardyd77fea22008-07-05 23:22:05 -07001485 if (cl->tparent &&
1486 cl->tparent->common.classid != parentid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 return -EINVAL;
1488 if (!cl->tparent && parentid != TC_H_ROOT)
1489 return -EINVAL;
1490 }
1491
Patrick McHardy1e904742008-01-22 22:11:17 -08001492 if (tb[TCA_CBQ_RATE]) {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001493 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]),
1494 tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 if (rtab == NULL)
1496 return -EINVAL;
1497 }
1498
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001499 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001500 err = gen_replace_estimator(&cl->bstats, NULL,
1501 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001502 NULL,
1503 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001504 tca[TCA_RATE]);
1505 if (err) {
Yang Yingliang79c11f22013-12-17 15:29:17 +08001506 qdisc_put_rtab(rtab);
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001507 return err;
1508 }
1509 }
1510
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 /* Change class parameters */
1512 sch_tree_lock(sch);
1513
1514 if (cl->next_alive != NULL)
1515 cbq_deactivate_class(cl);
1516
1517 if (rtab) {
Patrick McHardyb94c8af2008-11-20 04:11:36 -08001518 qdisc_put_rtab(cl->R_tab);
1519 cl->R_tab = rtab;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 }
1521
Patrick McHardy1e904742008-01-22 22:11:17 -08001522 if (tb[TCA_CBQ_LSSOPT])
1523 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Patrick McHardy1e904742008-01-22 22:11:17 -08001525 if (tb[TCA_CBQ_WRROPT]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 cbq_rmprio(q, cl);
Patrick McHardy1e904742008-01-22 22:11:17 -08001527 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528 }
1529
Patrick McHardy1e904742008-01-22 22:11:17 -08001530 if (tb[TCA_CBQ_FOPT])
1531 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
1533 if (cl->q->q.qlen)
1534 cbq_activate_class(cl);
1535
1536 sch_tree_unlock(sch);
1537
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 return 0;
1539 }
1540
1541 if (parentid == TC_H_ROOT)
1542 return -EINVAL;
1543
Patrick McHardy1e904742008-01-22 22:11:17 -08001544 if (tb[TCA_CBQ_WRROPT] == NULL || tb[TCA_CBQ_RATE] == NULL ||
1545 tb[TCA_CBQ_LSSOPT] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 return -EINVAL;
1547
Patrick McHardy1e904742008-01-22 22:11:17 -08001548 rtab = qdisc_get_rtab(nla_data(tb[TCA_CBQ_RATE]), tb[TCA_CBQ_RTAB]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 if (rtab == NULL)
1550 return -EINVAL;
1551
1552 if (classid) {
1553 err = -EINVAL;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001554 if (TC_H_MAJ(classid ^ sch->handle) ||
1555 cbq_class_lookup(q, classid))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556 goto failure;
1557 } else {
1558 int i;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001559 classid = TC_H_MAKE(sch->handle, 0x8000);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001561 for (i = 0; i < 0x8000; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 if (++q->hgenerator >= 0x8000)
1563 q->hgenerator = 1;
1564 if (cbq_class_lookup(q, classid|q->hgenerator) == NULL)
1565 break;
1566 }
1567 err = -ENOSR;
1568 if (i >= 0x8000)
1569 goto failure;
1570 classid = classid|q->hgenerator;
1571 }
1572
1573 parent = &q->link;
1574 if (parentid) {
1575 parent = cbq_class_lookup(q, parentid);
1576 err = -EINVAL;
1577 if (parent == NULL)
1578 goto failure;
1579 }
1580
1581 err = -ENOBUFS;
Panagiotis Issaris0da974f2006-07-21 14:51:30 -07001582 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 if (cl == NULL)
1584 goto failure;
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001585
1586 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001587 err = gen_new_estimator(&cl->bstats, NULL, &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001588 NULL,
1589 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001590 tca[TCA_RATE]);
1591 if (err) {
1592 kfree(cl);
1593 goto failure;
1594 }
1595 }
1596
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 cl->R_tab = rtab;
1598 rtab = NULL;
1599 cl->refcnt = 1;
Changli Gao3511c912010-10-16 13:04:08 +00001600 cl->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid);
1601 if (!cl->q)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 cl->q = &noop_qdisc;
Patrick McHardyd77fea22008-07-05 23:22:05 -07001603 cl->common.classid = classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 cl->tparent = parent;
1605 cl->qdisc = sch;
1606 cl->allot = parent->allot;
1607 cl->quantum = cl->allot;
1608 cl->weight = cl->R_tab->rate.rate;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609
1610 sch_tree_lock(sch);
1611 cbq_link_class(cl);
1612 cl->borrow = cl->tparent;
1613 if (cl->tparent != &q->link)
1614 cl->share = cl->tparent;
1615 cbq_adjust_levels(parent);
1616 cl->minidle = -0x7FFFFFFF;
Patrick McHardy1e904742008-01-22 22:11:17 -08001617 cbq_set_lss(cl, nla_data(tb[TCA_CBQ_LSSOPT]));
1618 cbq_set_wrr(cl, nla_data(tb[TCA_CBQ_WRROPT]));
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001619 if (cl->ewma_log == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620 cl->ewma_log = q->link.ewma_log;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001621 if (cl->maxidle == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 cl->maxidle = q->link.maxidle;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001623 if (cl->avpkt == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 cl->avpkt = q->link.avpkt;
Patrick McHardy1e904742008-01-22 22:11:17 -08001625 if (tb[TCA_CBQ_FOPT])
1626 cbq_set_fopt(cl, nla_data(tb[TCA_CBQ_FOPT]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 sch_tree_unlock(sch);
1628
Patrick McHardyd77fea22008-07-05 23:22:05 -07001629 qdisc_class_hash_grow(sch, &q->clhash);
1630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 *arg = (unsigned long)cl;
1632 return 0;
1633
1634failure:
1635 qdisc_put_rtab(rtab);
1636 return err;
1637}
1638
1639static int cbq_delete(struct Qdisc *sch, unsigned long arg)
1640{
1641 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001642 struct cbq_class *cl = (struct cbq_class *)arg;
WANG Cong2ccccf52016-02-25 14:55:01 -08001643 unsigned int qlen, backlog;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001644
1645 if (cl->filters || cl->children || cl == &q->link)
1646 return -EBUSY;
1647
1648 sch_tree_lock(sch);
1649
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001650 qlen = cl->q->q.qlen;
WANG Cong2ccccf52016-02-25 14:55:01 -08001651 backlog = cl->q->qstats.backlog;
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001652 qdisc_reset(cl->q);
WANG Cong2ccccf52016-02-25 14:55:01 -08001653 qdisc_tree_reduce_backlog(cl->q, qlen, backlog);
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001654
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 if (cl->next_alive)
1656 cbq_deactivate_class(cl);
1657
1658 if (q->tx_borrowed == cl)
1659 q->tx_borrowed = q->tx_class;
1660 if (q->tx_class == cl) {
1661 q->tx_class = NULL;
1662 q->tx_borrowed = NULL;
1663 }
Patrick McHardyc3bc7cf2007-07-15 00:03:05 -07001664#ifdef CONFIG_NET_CLS_ACT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665 if (q->rx_class == cl)
1666 q->rx_class = NULL;
1667#endif
1668
1669 cbq_unlink_class(cl);
1670 cbq_adjust_levels(cl->tparent);
1671 cl->defmap = 0;
1672 cbq_sync_defmap(cl);
1673
1674 cbq_rmprio(q, cl);
1675 sch_tree_unlock(sch);
1676
Jarek Poplawski7cd0a632009-03-15 20:00:19 -07001677 BUG_ON(--cl->refcnt == 0);
1678 /*
1679 * This shouldn't happen: we "hold" one cops->get() when called
1680 * from tc_ctl_tclass; the destroy method is done from cops->put().
1681 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682
1683 return 0;
1684}
1685
John Fastabend25d8c0d2014-09-12 20:05:27 -07001686static struct tcf_proto __rcu **cbq_find_tcf(struct Qdisc *sch,
1687 unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688{
1689 struct cbq_sched_data *q = qdisc_priv(sch);
1690 struct cbq_class *cl = (struct cbq_class *)arg;
1691
1692 if (cl == NULL)
1693 cl = &q->link;
1694
1695 return &cl->filter_list;
1696}
1697
1698static unsigned long cbq_bind_filter(struct Qdisc *sch, unsigned long parent,
1699 u32 classid)
1700{
1701 struct cbq_sched_data *q = qdisc_priv(sch);
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001702 struct cbq_class *p = (struct cbq_class *)parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 struct cbq_class *cl = cbq_class_lookup(q, classid);
1704
1705 if (cl) {
1706 if (p && p->level <= cl->level)
1707 return 0;
1708 cl->filters++;
1709 return (unsigned long)cl;
1710 }
1711 return 0;
1712}
1713
1714static void cbq_unbind_filter(struct Qdisc *sch, unsigned long arg)
1715{
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001716 struct cbq_class *cl = (struct cbq_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
1718 cl->filters--;
1719}
1720
1721static void cbq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1722{
1723 struct cbq_sched_data *q = qdisc_priv(sch);
Patrick McHardyd77fea22008-07-05 23:22:05 -07001724 struct cbq_class *cl;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001725 unsigned int h;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
1727 if (arg->stop)
1728 return;
1729
Patrick McHardyd77fea22008-07-05 23:22:05 -07001730 for (h = 0; h < q->clhash.hashsize; h++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001731 hlist_for_each_entry(cl, &q->clhash.hash[h], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 if (arg->count < arg->skip) {
1733 arg->count++;
1734 continue;
1735 }
1736 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1737 arg->stop = 1;
1738 return;
1739 }
1740 arg->count++;
1741 }
1742 }
1743}
1744
Eric Dumazet20fea082007-11-14 01:44:41 -08001745static const struct Qdisc_class_ops cbq_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 .graft = cbq_graft,
1747 .leaf = cbq_leaf,
Jarek Poplawskia37ef2e2006-12-08 00:25:55 -08001748 .qlen_notify = cbq_qlen_notify,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 .get = cbq_get,
1750 .put = cbq_put,
1751 .change = cbq_change_class,
1752 .delete = cbq_delete,
1753 .walk = cbq_walk,
1754 .tcf_chain = cbq_find_tcf,
1755 .bind_tcf = cbq_bind_filter,
1756 .unbind_tcf = cbq_unbind_filter,
1757 .dump = cbq_dump_class,
1758 .dump_stats = cbq_dump_class_stats,
1759};
1760
Eric Dumazet20fea082007-11-14 01:44:41 -08001761static struct Qdisc_ops cbq_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 .next = NULL,
1763 .cl_ops = &cbq_class_ops,
1764 .id = "cbq",
1765 .priv_size = sizeof(struct cbq_sched_data),
1766 .enqueue = cbq_enqueue,
1767 .dequeue = cbq_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001768 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 .init = cbq_init,
1770 .reset = cbq_reset,
1771 .destroy = cbq_destroy,
1772 .change = NULL,
1773 .dump = cbq_dump,
1774 .dump_stats = cbq_dump_stats,
1775 .owner = THIS_MODULE,
1776};
1777
1778static int __init cbq_module_init(void)
1779{
1780 return register_qdisc(&cbq_qdisc_ops);
1781}
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +09001782static void __exit cbq_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783{
1784 unregister_qdisc(&cbq_qdisc_ops);
1785}
1786module_init(cbq_module_init)
1787module_exit(cbq_module_exit)
1788MODULE_LICENSE("GPL");