blob: 41d9b7da9273b564a1971b28c94c088db496fbe5 [file] [log] [blame]
Stephen Hemminger87990462006-08-10 23:35:16 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Martin Devera, <devik@cdi.cz>
10 *
11 * Credits (in time order) for older HTB versions:
12 * Stef Coene <stef.coene@docum.org>
13 * HTB support at LARTC mailing list
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090014 * Ondrej Kraus, <krauso@barr.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015 * found missing INIT_QDISC(htb)
16 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
17 * helped a lot to locate nasty class stall bug
18 * Andi Kleen, Jamal Hadi, Bert Hubert
19 * code review and helpful comments on shaping
20 * Tomasz Wrona, <tw@eter.tym.pl>
21 * created test case so that I was able to fix nasty bug
22 * Wilfried Weissmann
23 * spotted bug in dequeue code and helped with fix
24 * Jiri Fojtasek
25 * fixed requeue routine
26 * and many others. thanks.
Linus Torvalds1da177e2005-04-16 15:20:36 -070027 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/module.h>
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070029#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/types.h>
31#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <linux/skbuff.h>
35#include <linux/list.h>
36#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <linux/rbtree.h>
Jarek Poplawski12247362009-02-01 01:13:22 -080038#include <linux/workqueue.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090039#include <linux/slab.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070040#include <net/netlink.h>
Jiri Pirko292f1c72013-02-12 00:12:03 +000041#include <net/sch_generic.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070042#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010043#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
45/* HTB algorithm.
46 Author: devik@cdi.cz
47 ========================================================================
48 HTB is like TBF with multiple classes. It is also similar to CBQ because
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090049 it allows to assign priority to each class in hierarchy.
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 In fact it is another implementation of Floyd's formal sharing.
51
52 Levels:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090053 Each class is assigned level. Leaf has ALWAYS level 0 and root
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
55 one less than their parent.
56*/
57
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070058static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
Stephen Hemminger87990462006-08-10 23:35:16 -070059#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
61#if HTB_VER >> 16 != TC_HTB_PROTOVER
62#error "Mismatched sch_htb.c and pkt_sch.h"
63#endif
64
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070065/* Module parameter and sysfs export */
66module_param (htb_hysteresis, int, 0640);
67MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
68
Eric Dumazet64153ce2013-06-06 14:53:16 -070069static int htb_rate_est = 0; /* htb classes have a default rate estimator */
70module_param(htb_rate_est, int, 0640);
71MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073/* used internaly to keep status of single class */
74enum htb_cmode {
Stephen Hemminger87990462006-08-10 23:35:16 -070075 HTB_CANT_SEND, /* class can't send and can't borrow */
76 HTB_MAY_BORROW, /* class can't send but may borrow */
77 HTB_CAN_SEND /* class can send */
Linus Torvalds1da177e2005-04-16 15:20:36 -070078};
79
Eric Dumazetc9364632013-06-15 03:30:10 -070080struct htb_prio {
81 union {
82 struct rb_root row;
83 struct rb_root feed;
84 };
85 struct rb_node *ptr;
86 /* When class changes from state 1->2 and disconnects from
87 * parent's feed then we lost ptr value and start from the
88 * first child again. Here we store classid of the
89 * last valid ptr (used when ptr is NULL).
90 */
91 u32 last_ptr_id;
92};
93
Eric Dumazetca4ec902013-06-13 07:58:30 -070094/* interior & leaf nodes; props specific to leaves are marked L:
95 * To reduce false sharing, place mostly read fields at beginning,
96 * and mostly written ones at the end.
97 */
Stephen Hemminger87990462006-08-10 23:35:16 -070098struct htb_class {
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -070099 struct Qdisc_class_common common;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700100 struct psched_ratecfg rate;
101 struct psched_ratecfg ceil;
102 s64 buffer, cbuffer;/* token bucket depth/rate */
103 s64 mbuffer; /* max wait time */
stephen hemmingercbd37552013-08-01 22:32:07 -0700104 u32 prio; /* these two are used only by leaves... */
Eric Dumazetca4ec902013-06-13 07:58:30 -0700105 int quantum; /* but stored for parent-to-leaf return */
106
John Fastabend25d8c0d2014-09-12 20:05:27 -0700107 struct tcf_proto __rcu *filter_list; /* class attached filters */
Jiri Pirko6529eab2017-05-17 11:07:55 +0200108 struct tcf_block *block;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700109 int filter_cnt;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700110
111 int level; /* our level (see above) */
112 unsigned int children;
113 struct htb_class *parent; /* parent class */
114
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800115 struct net_rate_estimator __rcu *rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
Eric Dumazetca4ec902013-06-13 07:58:30 -0700117 /*
118 * Written often fields
119 */
120 struct gnet_stats_basic_packed bstats;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700121 struct tc_htb_xstats xstats; /* our special stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Eric Dumazetca4ec902013-06-13 07:58:30 -0700123 /* token bucket parameters */
124 s64 tokens, ctokens;/* current number of tokens */
125 s64 t_c; /* checkpoint time */
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800126
Stephen Hemminger87990462006-08-10 23:35:16 -0700127 union {
128 struct htb_class_leaf {
Stephen Hemminger87990462006-08-10 23:35:16 -0700129 struct list_head drop_list;
Eric Dumazetc9364632013-06-15 03:30:10 -0700130 int deficit[TC_HTB_MAXDEPTH];
131 struct Qdisc *q;
Stephen Hemminger87990462006-08-10 23:35:16 -0700132 } leaf;
133 struct htb_class_inner {
Eric Dumazetc9364632013-06-15 03:30:10 -0700134 struct htb_prio clprio[TC_HTB_NUMPRIO];
Stephen Hemminger87990462006-08-10 23:35:16 -0700135 } inner;
136 } un;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700137 s64 pq_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Eric Dumazetca4ec902013-06-13 07:58:30 -0700139 int prio_activity; /* for which prios are we active */
140 enum htb_cmode cmode; /* current mode of the class */
141 struct rb_node pq_node; /* node for event queue */
142 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
Eric Dumazet338ed9b2016-06-21 23:16:51 -0700143
144 unsigned int drops ____cacheline_aligned_in_smp;
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700145 unsigned int overlimits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146};
147
Eric Dumazetc9364632013-06-15 03:30:10 -0700148struct htb_level {
149 struct rb_root wait_pq;
150 struct htb_prio hprio[TC_HTB_NUMPRIO];
151};
152
Stephen Hemminger87990462006-08-10 23:35:16 -0700153struct htb_sched {
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700154 struct Qdisc_class_hash clhash;
Eric Dumazetc9364632013-06-15 03:30:10 -0700155 int defcls; /* class where unclassified flows go to */
156 int rate2quantum; /* quant = rate / rate2quantum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157
Stephen Hemminger87990462006-08-10 23:35:16 -0700158 /* filters for qdisc itself */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700159 struct tcf_proto __rcu *filter_list;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200160 struct tcf_block *block;
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800161
162#define HTB_WARN_TOOMANYEVENTS 0x1
Eric Dumazetc9364632013-06-15 03:30:10 -0700163 unsigned int warned; /* only one warning */
164 int direct_qlen;
165 struct work_struct work;
166
167 /* non shaped skbs; let them go directly thru */
Florian Westphal48da34b2016-09-18 00:57:34 +0200168 struct qdisc_skb_head direct_queue;
Eric Dumazetc9364632013-06-15 03:30:10 -0700169 long direct_pkts;
170
171 struct qdisc_watchdog watchdog;
172
173 s64 now; /* cached dequeue time */
174 struct list_head drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
175
176 /* time of nearest event per level (row) */
177 s64 near_ev_cache[TC_HTB_MAXDEPTH];
178
179 int row_mask[TC_HTB_MAXDEPTH];
180
181 struct htb_level hlevel[TC_HTB_MAXDEPTH];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182};
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184/* find class in global hash table using given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700185static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700188 struct Qdisc_class_common *clc;
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700189
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700190 clc = qdisc_class_find(&q->clhash, handle);
191 if (clc == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 return NULL;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700193 return container_of(clc, struct htb_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194}
195
WANG Cong143976c2017-08-24 16:51:29 -0700196static unsigned long htb_search(struct Qdisc *sch, u32 handle)
197{
198 return (unsigned long)htb_find(handle, sch);
199}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200/**
201 * htb_classify - classify a packet into class
202 *
203 * It returns NULL if the packet should be dropped or -1 if the packet
204 * should be passed directly thru. In all other cases leaf class is returned.
205 * We allow direct class selection by classid in priority. The we examine
206 * filters in qdisc and in inner nodes (if higher filter points to the inner
207 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900208 * internal fifo (direct). These packets then go directly thru. If we still
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300209 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 * then finish and return direct queue.
211 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000212#define HTB_DIRECT ((struct htb_class *)-1L)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Stephen Hemminger87990462006-08-10 23:35:16 -0700214static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
215 int *qerr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216{
217 struct htb_sched *q = qdisc_priv(sch);
218 struct htb_class *cl;
219 struct tcf_result res;
220 struct tcf_proto *tcf;
221 int result;
222
223 /* allow to select class by setting skb->priority to valid classid;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000224 * note that nfmark can be used too by attaching filter fw with no
225 * rules in it
226 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 if (skb->priority == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700228 return HTB_DIRECT; /* X:0 (direct flow) selected */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000229 cl = htb_find(skb->priority, sch);
Harry Mason29824312014-01-17 13:22:32 +0000230 if (cl) {
231 if (cl->level == 0)
232 return cl;
233 /* Start with inner filter chain if a non-leaf class is selected */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700234 tcf = rcu_dereference_bh(cl->filter_list);
Harry Mason29824312014-01-17 13:22:32 +0000235 } else {
John Fastabend25d8c0d2014-09-12 20:05:27 -0700236 tcf = rcu_dereference_bh(q->filter_list);
Harry Mason29824312014-01-17 13:22:32 +0000237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700239 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Jiri Pirko87d83092017-05-17 11:07:54 +0200240 while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241#ifdef CONFIG_NET_CLS_ACT
242 switch (result) {
243 case TC_ACT_QUEUED:
Stephen Hemminger87990462006-08-10 23:35:16 -0700244 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +0200245 case TC_ACT_TRAP:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700246 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Gustavo A. R. Silvaf3ae6082017-10-19 16:28:24 -0500247 /* fall through */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 case TC_ACT_SHOT:
249 return NULL;
250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251#endif
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000252 cl = (void *)res.class;
253 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 if (res.classid == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700255 return HTB_DIRECT; /* X:0 (direct flow) */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000256 cl = htb_find(res.classid, sch);
257 if (!cl)
Stephen Hemminger87990462006-08-10 23:35:16 -0700258 break; /* filter selected invalid classid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 }
260 if (!cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700261 return cl; /* we hit leaf; return it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263 /* we have got inner class; apply inner filter chain */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700264 tcf = rcu_dereference_bh(cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 }
266 /* classification failed; try to use default class */
Stephen Hemminger87990462006-08-10 23:35:16 -0700267 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 if (!cl || cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700269 return HTB_DIRECT; /* bad default .. this is safe bet */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 return cl;
271}
272
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273/**
274 * htb_add_to_id_tree - adds class to the round robin list
275 *
276 * Routine adds class to the list (actually tree) sorted by classid.
277 * Make sure that class is not already on such list for given prio.
278 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700279static void htb_add_to_id_tree(struct rb_root *root,
280 struct htb_class *cl, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
282 struct rb_node **p = &root->rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700285 struct htb_class *c;
286 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 c = rb_entry(parent, struct htb_class, node[prio]);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700288
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700289 if (cl->common.classid > c->common.classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700291 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 p = &parent->rb_left;
293 }
294 rb_link_node(&cl->node[prio], parent, p);
295 rb_insert_color(&cl->node[prio], root);
296}
297
298/**
299 * htb_add_to_wait_tree - adds class to the event queue with delay
300 *
301 * The class is added to priority event queue to indicate that class will
302 * change its mode in cl->pq_key microseconds. Make sure that class is not
303 * already in the queue.
304 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700305static void htb_add_to_wait_tree(struct htb_sched *q,
Vimalkumar56b765b2012-10-31 06:04:11 +0000306 struct htb_class *cl, s64 delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307{
Eric Dumazetc9364632013-06-15 03:30:10 -0700308 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700309
Patrick McHardyfb983d42007-03-16 01:22:39 -0700310 cl->pq_key = q->now + delay;
311 if (cl->pq_key == q->now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 cl->pq_key++;
313
314 /* update the nearest event cache */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700315 if (q->near_ev_cache[cl->level] > cl->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 q->near_ev_cache[cl->level] = cl->pq_key;
Stephen Hemminger87990462006-08-10 23:35:16 -0700317
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700319 struct htb_class *c;
320 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 c = rb_entry(parent, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700322 if (cl->pq_key >= c->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700324 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 p = &parent->rb_left;
326 }
327 rb_link_node(&cl->pq_node, parent, p);
Eric Dumazetc9364632013-06-15 03:30:10 -0700328 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329}
330
331/**
332 * htb_next_rb_node - finds next node in binary tree
333 *
334 * When we are past last key we return NULL.
335 * Average complexity is 2 steps per call.
336 */
Stephen Hemminger3696f622006-08-10 23:36:01 -0700337static inline void htb_next_rb_node(struct rb_node **n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
339 *n = rb_next(*n);
340}
341
342/**
343 * htb_add_class_to_row - add class to its row
344 *
345 * The class is added to row at priorities marked in mask.
346 * It does nothing if mask == 0.
347 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700348static inline void htb_add_class_to_row(struct htb_sched *q,
349 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 q->row_mask[cl->level] |= mask;
352 while (mask) {
353 int prio = ffz(~mask);
354 mask &= ~(1 << prio);
Eric Dumazetc9364632013-06-15 03:30:10 -0700355 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 }
357}
358
Stephen Hemminger3696f622006-08-10 23:36:01 -0700359/* If this triggers, it is a bug in this code, but it need not be fatal */
360static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
361{
Ismail Donmez81771b32006-10-03 13:49:10 -0700362 if (RB_EMPTY_NODE(rb)) {
Stephen Hemminger3696f622006-08-10 23:36:01 -0700363 WARN_ON(1);
364 } else {
365 rb_erase(rb, root);
366 RB_CLEAR_NODE(rb);
367 }
368}
369
370
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371/**
372 * htb_remove_class_from_row - removes class from its row
373 *
374 * The class is removed from row at priorities marked in mask.
375 * It does nothing if mask == 0.
376 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700377static inline void htb_remove_class_from_row(struct htb_sched *q,
378 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379{
380 int m = 0;
Eric Dumazetc9364632013-06-15 03:30:10 -0700381 struct htb_level *hlevel = &q->hlevel[cl->level];
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 while (mask) {
384 int prio = ffz(~mask);
Eric Dumazetc9364632013-06-15 03:30:10 -0700385 struct htb_prio *hprio = &hlevel->hprio[prio];
Stephen Hemminger3696f622006-08-10 23:36:01 -0700386
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 mask &= ~(1 << prio);
Eric Dumazetc9364632013-06-15 03:30:10 -0700388 if (hprio->ptr == cl->node + prio)
389 htb_next_rb_node(&hprio->ptr);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700390
Eric Dumazetc9364632013-06-15 03:30:10 -0700391 htb_safe_rb_erase(cl->node + prio, &hprio->row);
392 if (!hprio->row.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 m |= 1 << prio;
394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395 q->row_mask[cl->level] &= ~m;
396}
397
398/**
399 * htb_activate_prios - creates active classe's feed chain
400 *
401 * The class is connected to ancestors and/or appropriate rows
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900402 * for priorities it is participating on. cl->cmode must be new
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 * (activated) mode. It does nothing if cl->prio_activity == 0.
404 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700405static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406{
407 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700408 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
410 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700411 m = mask;
412 while (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 int prio = ffz(~m);
414 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700415
Eric Dumazetc9364632013-06-15 03:30:10 -0700416 if (p->un.inner.clprio[prio].feed.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 /* parent already has its feed in use so that
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000418 * reset bit in mask as parent is already ok
419 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700421
Eric Dumazetc9364632013-06-15 03:30:10 -0700422 htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 p->prio_activity |= mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700425 cl = p;
426 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700427
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 }
429 if (cl->cmode == HTB_CAN_SEND && mask)
Stephen Hemminger87990462006-08-10 23:35:16 -0700430 htb_add_class_to_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
433/**
434 * htb_deactivate_prios - remove class from feed chain
435 *
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900436 * cl->cmode must represent old mode (before deactivation). It does
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 * nothing if cl->prio_activity == 0. Class is removed from all feed
438 * chains and rows.
439 */
440static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
441{
442 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700443 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
445 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700446 m = mask;
447 mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 while (m) {
449 int prio = ffz(~m);
450 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700451
Eric Dumazetc9364632013-06-15 03:30:10 -0700452 if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 /* we are removing child which is pointed to from
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000454 * parent feed - forget the pointer but remember
455 * classid
456 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700457 p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
458 p->un.inner.clprio[prio].ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700460
Eric Dumazetc9364632013-06-15 03:30:10 -0700461 htb_safe_rb_erase(cl->node + prio,
462 &p->un.inner.clprio[prio].feed);
Stephen Hemminger87990462006-08-10 23:35:16 -0700463
Eric Dumazetc9364632013-06-15 03:30:10 -0700464 if (!p->un.inner.clprio[prio].feed.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 mask |= 1 << prio;
466 }
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 p->prio_activity &= ~mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700469 cl = p;
470 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700471
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700473 if (cl->cmode == HTB_CAN_SEND && mask)
474 htb_remove_class_from_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475}
476
Vimalkumar56b765b2012-10-31 06:04:11 +0000477static inline s64 htb_lowater(const struct htb_class *cl)
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700478{
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700479 if (htb_hysteresis)
480 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
481 else
482 return 0;
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700483}
Vimalkumar56b765b2012-10-31 06:04:11 +0000484static inline s64 htb_hiwater(const struct htb_class *cl)
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700485{
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700486 if (htb_hysteresis)
487 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
488 else
489 return 0;
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700490}
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700491
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700492
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493/**
494 * htb_class_mode - computes and returns current class mode
495 *
496 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
497 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900498 * from now to time when cl will change its state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 * Also it is worth to note that class mode doesn't change simply
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900500 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
502 * mode transitions per time unit. The speed gain is about 1/6.
503 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700504static inline enum htb_cmode
Vimalkumar56b765b2012-10-31 06:04:11 +0000505htb_class_mode(struct htb_class *cl, s64 *diff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506{
Vimalkumar56b765b2012-10-31 06:04:11 +0000507 s64 toks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508
Stephen Hemminger87990462006-08-10 23:35:16 -0700509 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
510 *diff = -toks;
511 return HTB_CANT_SEND;
512 }
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700513
Stephen Hemminger87990462006-08-10 23:35:16 -0700514 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
515 return HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516
Stephen Hemminger87990462006-08-10 23:35:16 -0700517 *diff = -toks;
518 return HTB_MAY_BORROW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519}
520
521/**
522 * htb_change_class_mode - changes classe's mode
523 *
524 * This should be the only way how to change classe's mode under normal
525 * cirsumstances. Routine will update feed lists linkage, change mode
526 * and add class to the wait event queue if appropriate. New mode should
527 * be different from old one and cl->pq_key has to be valid if changing
528 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
529 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700530static void
Vimalkumar56b765b2012-10-31 06:04:11 +0000531htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
Stephen Hemminger87990462006-08-10 23:35:16 -0700532{
533 enum htb_cmode new_mode = htb_class_mode(cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 if (new_mode == cl->cmode)
Stephen Hemminger87990462006-08-10 23:35:16 -0700536 return;
537
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700538 if (new_mode == HTB_CANT_SEND)
539 cl->overlimits++;
540
Stephen Hemminger87990462006-08-10 23:35:16 -0700541 if (cl->prio_activity) { /* not necessary: speed optimization */
542 if (cl->cmode != HTB_CANT_SEND)
543 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 cl->cmode = new_mode;
Stephen Hemminger87990462006-08-10 23:35:16 -0700545 if (new_mode != HTB_CANT_SEND)
546 htb_activate_prios(q, cl);
547 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 cl->cmode = new_mode;
549}
550
551/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900552 * htb_activate - inserts leaf cl into appropriate active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 *
554 * Routine learns (new) priority of leaf and activates feed chain
555 * for the prio. It can be called on already active leaf safely.
556 * It also adds leaf into droplist.
557 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700558static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700560 WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 if (!cl->prio_activity) {
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800563 cl->prio_activity = 1 << cl->prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700564 htb_activate_prios(q, cl);
565 list_add_tail(&cl->un.leaf.drop_list,
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800566 q->drops + cl->prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 }
568}
569
570/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900571 * htb_deactivate - remove leaf cl from active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 *
573 * Make sure that leaf is active. In the other words it can't be called
574 * with non-active leaf. It also removes class from the drop list.
575 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700576static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700578 WARN_ON(!cl->prio_activity);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700579
Stephen Hemminger87990462006-08-10 23:35:16 -0700580 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 cl->prio_activity = 0;
582 list_del_init(&cl->un.leaf.drop_list);
583}
584
Florian Westphal48da34b2016-09-18 00:57:34 +0200585static void htb_enqueue_tail(struct sk_buff *skb, struct Qdisc *sch,
586 struct qdisc_skb_head *qh)
587{
588 struct sk_buff *last = qh->tail;
589
590 if (last) {
591 skb->next = NULL;
592 last->next = skb;
593 qh->tail = skb;
594 } else {
595 qh->tail = skb;
596 qh->head = skb;
597 }
598 qh->qlen++;
599}
600
Eric Dumazet520ac302016-06-21 23:16:49 -0700601static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
602 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603{
Jarek Poplawskif30ab412008-11-13 22:56:30 -0800604 int uninitialized_var(ret);
Stephen Hemminger87990462006-08-10 23:35:16 -0700605 struct htb_sched *q = qdisc_priv(sch);
606 struct htb_class *cl = htb_classify(skb, sch, &ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607
Stephen Hemminger87990462006-08-10 23:35:16 -0700608 if (cl == HTB_DIRECT) {
609 /* enqueue to helper queue */
610 if (q->direct_queue.qlen < q->direct_qlen) {
Florian Westphal48da34b2016-09-18 00:57:34 +0200611 htb_enqueue_tail(skb, sch, &q->direct_queue);
Stephen Hemminger87990462006-08-10 23:35:16 -0700612 q->direct_pkts++;
613 } else {
Eric Dumazet520ac302016-06-21 23:16:49 -0700614 return qdisc_drop(skb, sch, to_free);
Stephen Hemminger87990462006-08-10 23:35:16 -0700615 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616#ifdef CONFIG_NET_CLS_ACT
Stephen Hemminger87990462006-08-10 23:35:16 -0700617 } else if (!cl) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700618 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700619 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700620 __qdisc_drop(skb, to_free);
Stephen Hemminger87990462006-08-10 23:35:16 -0700621 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622#endif
Eric Dumazet520ac302016-06-21 23:16:49 -0700623 } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q,
624 to_free)) != NET_XMIT_SUCCESS) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700625 if (net_xmit_drop_count(ret)) {
John Fastabend25331d62014-09-28 11:53:29 -0700626 qdisc_qstats_drop(sch);
Eric Dumazet338ed9b2016-06-21 23:16:51 -0700627 cl->drops++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700628 }
David S. Miller69747652008-08-17 23:55:36 -0700629 return ret;
Stephen Hemminger87990462006-08-10 23:35:16 -0700630 } else {
Stephen Hemminger87990462006-08-10 23:35:16 -0700631 htb_activate(q, cl);
632 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633
WANG Cong431e3a82016-02-25 14:55:02 -0800634 qdisc_qstats_backlog_inc(sch, skb);
Stephen Hemminger87990462006-08-10 23:35:16 -0700635 sch->q.qlen++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700636 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637}
638
Vimalkumar56b765b2012-10-31 06:04:11 +0000639static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
Jarek Poplawski59e42202008-12-03 21:17:27 -0800640{
Vimalkumar56b765b2012-10-31 06:04:11 +0000641 s64 toks = diff + cl->tokens;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800642
643 if (toks > cl->buffer)
644 toks = cl->buffer;
Jiri Pirko292f1c72013-02-12 00:12:03 +0000645 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
Jarek Poplawski59e42202008-12-03 21:17:27 -0800646 if (toks <= -cl->mbuffer)
647 toks = 1 - cl->mbuffer;
648
649 cl->tokens = toks;
650}
651
Vimalkumar56b765b2012-10-31 06:04:11 +0000652static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
Jarek Poplawski59e42202008-12-03 21:17:27 -0800653{
Vimalkumar56b765b2012-10-31 06:04:11 +0000654 s64 toks = diff + cl->ctokens;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800655
656 if (toks > cl->cbuffer)
657 toks = cl->cbuffer;
Jiri Pirko292f1c72013-02-12 00:12:03 +0000658 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
Jarek Poplawski59e42202008-12-03 21:17:27 -0800659 if (toks <= -cl->mbuffer)
660 toks = 1 - cl->mbuffer;
661
662 cl->ctokens = toks;
663}
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665/**
666 * htb_charge_class - charges amount "bytes" to leaf and ancestors
667 *
668 * Routine assumes that packet "bytes" long was dequeued from leaf cl
669 * borrowing from "level". It accounts bytes to ceil leaky bucket for
670 * leaf and all ancestors and to rate bucket for ancestors at levels
671 * "level" and higher. It also handles possible change of mode resulting
672 * from the update. Note that mode can also increase here (MAY_BORROW to
673 * CAN_SEND) because we can use more precise clock that event queue here.
674 * In such case we remove class from event queue first.
675 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700676static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700677 int level, struct sk_buff *skb)
Stephen Hemminger87990462006-08-10 23:35:16 -0700678{
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700679 int bytes = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 enum htb_cmode old_mode;
Vimalkumar56b765b2012-10-31 06:04:11 +0000681 s64 diff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682
683 while (cl) {
Vimalkumar56b765b2012-10-31 06:04:11 +0000684 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 if (cl->level >= level) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700686 if (cl->level == level)
687 cl->xstats.lends++;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800688 htb_accnt_tokens(cl, bytes, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 } else {
690 cl->xstats.borrows++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700691 cl->tokens += diff; /* we moved t_c; update tokens */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 }
Jarek Poplawski59e42202008-12-03 21:17:27 -0800693 htb_accnt_ctokens(cl, bytes, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 cl->t_c = q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Stephen Hemminger87990462006-08-10 23:35:16 -0700696 old_mode = cl->cmode;
697 diff = 0;
698 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 if (old_mode != cl->cmode) {
700 if (old_mode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -0700701 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700703 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
Eric Dumazetbfe0d022011-01-09 08:30:54 +0000706 /* update basic stats except for leaves which are already updated */
707 if (cl->level)
708 bstats_update(&cl->bstats, skb);
709
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 cl = cl->parent;
711 }
712}
713
714/**
715 * htb_do_events - make mode changes to classes at the level
716 *
Patrick McHardyfb983d42007-03-16 01:22:39 -0700717 * Scans event queue for pending events and applies them. Returns time of
Jarek Poplawski12247362009-02-01 01:13:22 -0800718 * next pending event (0 for no event in pq, q->now for too many events).
Patrick McHardyfb983d42007-03-16 01:22:39 -0700719 * Note: Applied are events whose have cl->pq_key <= q->now.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700721static s64 htb_do_events(struct htb_sched *q, const int level,
Eric Dumazet5343a7f2013-06-04 07:11:48 +0000722 unsigned long start)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723{
Martin Devera8f3ea332008-03-23 22:00:38 -0700724 /* don't run for longer than 2 jiffies; 2 is used instead of
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000725 * 1 to simplify things when jiffy is going to be incremented
726 * too soon
727 */
Jarek Poplawskia73be042009-01-12 21:54:40 -0800728 unsigned long stop_at = start + 2;
Eric Dumazetc9364632013-06-15 03:30:10 -0700729 struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
730
Martin Devera8f3ea332008-03-23 22:00:38 -0700731 while (time_before(jiffies, stop_at)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 struct htb_class *cl;
Vimalkumar56b765b2012-10-31 06:04:11 +0000733 s64 diff;
Eric Dumazetc9364632013-06-15 03:30:10 -0700734 struct rb_node *p = rb_first(wait_pq);
Akinbou Mita30bdbe32006-10-12 01:52:05 -0700735
Stephen Hemminger87990462006-08-10 23:35:16 -0700736 if (!p)
737 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738
739 cl = rb_entry(p, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700740 if (cl->pq_key > q->now)
741 return cl->pq_key;
742
Eric Dumazetc9364632013-06-15 03:30:10 -0700743 htb_safe_rb_erase(p, wait_pq);
Vimalkumar56b765b2012-10-31 06:04:11 +0000744 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
Stephen Hemminger87990462006-08-10 23:35:16 -0700745 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700747 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 }
Jarek Poplawski12247362009-02-01 01:13:22 -0800749
750 /* too much load - let's continue after a break for scheduling */
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800751 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800752 pr_warn("htb: too many events!\n");
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800753 q->warned |= HTB_WARN_TOOMANYEVENTS;
754 }
Jarek Poplawski12247362009-02-01 01:13:22 -0800755
756 return q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
758
759/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000760 * is no such one exists.
761 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700762static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
763 u32 id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764{
765 struct rb_node *r = NULL;
766 while (n) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700767 struct htb_class *cl =
768 rb_entry(n, struct htb_class, node[prio]);
Stephen Hemminger87990462006-08-10 23:35:16 -0700769
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700770 if (id > cl->common.classid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 n = n->rb_right;
Jarek Poplawski1b5c0072008-12-09 22:34:40 -0800772 } else if (id < cl->common.classid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 r = n;
774 n = n->rb_left;
Jarek Poplawski1b5c0072008-12-09 22:34:40 -0800775 } else {
776 return n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 }
778 }
779 return r;
780}
781
782/**
783 * htb_lookup_leaf - returns next leaf class in DRR order
784 *
785 * Find leaf where current feed pointers points to.
786 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700787static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788{
789 int i;
790 struct {
791 struct rb_node *root;
792 struct rb_node **pptr;
793 u32 *pid;
Stephen Hemminger87990462006-08-10 23:35:16 -0700794 } stk[TC_HTB_MAXDEPTH], *sp = stk;
795
Eric Dumazetc9364632013-06-15 03:30:10 -0700796 BUG_ON(!hprio->row.rb_node);
797 sp->root = hprio->row.rb_node;
798 sp->pptr = &hprio->ptr;
799 sp->pid = &hprio->last_ptr_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
801 for (i = 0; i < 65535; i++) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700802 if (!*sp->pptr && *sp->pid) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900803 /* ptr was invalidated but id is valid - try to recover
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000804 * the original or next ptr
805 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700806 *sp->pptr =
807 htb_id_find_next_upper(prio, sp->root, *sp->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700809 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000810 * can become out of date quickly
811 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700812 if (!*sp->pptr) { /* we are at right end; rewind & go up */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 *sp->pptr = sp->root;
Stephen Hemminger87990462006-08-10 23:35:16 -0700814 while ((*sp->pptr)->rb_left)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 *sp->pptr = (*sp->pptr)->rb_left;
816 if (sp > stk) {
817 sp--;
Jarek Poplawski512bb432008-12-09 22:35:02 -0800818 if (!*sp->pptr) {
819 WARN_ON(1);
Stephen Hemminger87990462006-08-10 23:35:16 -0700820 return NULL;
Jarek Poplawski512bb432008-12-09 22:35:02 -0800821 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700822 htb_next_rb_node(sp->pptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 }
824 } else {
825 struct htb_class *cl;
Eric Dumazetc9364632013-06-15 03:30:10 -0700826 struct htb_prio *clp;
827
Stephen Hemminger87990462006-08-10 23:35:16 -0700828 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
829 if (!cl->level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 return cl;
Eric Dumazetc9364632013-06-15 03:30:10 -0700831 clp = &cl->un.inner.clprio[prio];
832 (++sp)->root = clp->feed.rb_node;
833 sp->pptr = &clp->ptr;
834 sp->pid = &clp->last_ptr_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 }
836 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700837 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 return NULL;
839}
840
841/* dequeues packet at given priority and level; call only if
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000842 * you are sure that there is active class at prio/level
843 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700844static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
845 const int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846{
847 struct sk_buff *skb = NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700848 struct htb_class *cl, *start;
Eric Dumazetc9364632013-06-15 03:30:10 -0700849 struct htb_level *hlevel = &q->hlevel[level];
850 struct htb_prio *hprio = &hlevel->hprio[prio];
851
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 /* look initial class up in the row */
Eric Dumazetc9364632013-06-15 03:30:10 -0700853 start = cl = htb_lookup_leaf(hprio, prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700854
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 do {
856next:
Jarek Poplawski512bb432008-12-09 22:35:02 -0800857 if (unlikely(!cl))
Stephen Hemminger87990462006-08-10 23:35:16 -0700858 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
860 /* class can be empty - it is unlikely but can be true if leaf
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000861 * qdisc drops packets in enqueue routine or if someone used
862 * graft operation on the leaf since last dequeue;
863 * simply deactivate and skip such class
864 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
866 struct htb_class *next;
Stephen Hemminger87990462006-08-10 23:35:16 -0700867 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
869 /* row/level might become empty */
870 if ((q->row_mask[level] & (1 << prio)) == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -0700871 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
Eric Dumazetc9364632013-06-15 03:30:10 -0700873 next = htb_lookup_leaf(hprio, prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700874
875 if (cl == start) /* fix start if we just deleted it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 start = next;
877 cl = next;
878 goto next;
879 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700880
881 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
882 if (likely(skb != NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 break;
Jarek Poplawski633fe662008-12-03 21:09:10 -0800884
Jarek Poplawskib00355d2009-02-01 01:12:42 -0800885 qdisc_warn_nonwc("htb", cl->un.leaf.q);
Eric Dumazetc9364632013-06-15 03:30:10 -0700886 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
887 &q->hlevel[0].hprio[prio].ptr);
888 cl = htb_lookup_leaf(hprio, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 } while (cl != start);
891
892 if (likely(skb != NULL)) {
Eric Dumazet196d97f2012-11-05 16:40:49 +0000893 bstats_update(&cl->bstats, skb);
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700894 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
895 if (cl->un.leaf.deficit[level] < 0) {
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800896 cl->un.leaf.deficit[level] += cl->quantum;
Eric Dumazetc9364632013-06-15 03:30:10 -0700897 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
898 &q->hlevel[0].hprio[prio].ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 }
900 /* this used to be after charge_class but this constelation
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000901 * gives us slightly better performance
902 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 if (!cl->un.leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700904 htb_deactivate(q, cl);
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700905 htb_charge_class(q, cl, level, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906 }
907 return skb;
908}
909
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910static struct sk_buff *htb_dequeue(struct Qdisc *sch)
911{
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800912 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 struct htb_sched *q = qdisc_priv(sch);
914 int level;
Eric Dumazet5343a7f2013-06-04 07:11:48 +0000915 s64 next_event;
Jarek Poplawskia73be042009-01-12 21:54:40 -0800916 unsigned long start_at;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917
918 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
Florian Westphal48da34b2016-09-18 00:57:34 +0200919 skb = __qdisc_dequeue_head(&q->direct_queue);
Stephen Hemminger87990462006-08-10 23:35:16 -0700920 if (skb != NULL) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800921ok:
922 qdisc_bstats_update(sch, skb);
WANG Cong431e3a82016-02-25 14:55:02 -0800923 qdisc_qstats_backlog_dec(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 sch->q.qlen--;
925 return skb;
926 }
927
Stephen Hemminger87990462006-08-10 23:35:16 -0700928 if (!sch->q.qlen)
929 goto fin;
Eric Dumazetd2de8752014-08-22 18:32:09 -0700930 q->now = ktime_get_ns();
Jarek Poplawskia73be042009-01-12 21:54:40 -0800931 start_at = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932
Stefan Haskod2fe85d2012-12-21 15:04:59 +0000933 next_event = q->now + 5LLU * NSEC_PER_SEC;
Jarek Poplawski633fe662008-12-03 21:09:10 -0800934
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
936 /* common case optimization - skip event handler quickly */
937 int m;
Eric Dumazetc9364632013-06-15 03:30:10 -0700938 s64 event = q->near_ev_cache[level];
Stephen Hemminger87990462006-08-10 23:35:16 -0700939
Eric Dumazetc9364632013-06-15 03:30:10 -0700940 if (q->now >= event) {
Jarek Poplawskia73be042009-01-12 21:54:40 -0800941 event = htb_do_events(q, level, start_at);
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700942 if (!event)
Vimalkumar56b765b2012-10-31 06:04:11 +0000943 event = q->now + NSEC_PER_SEC;
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700944 q->near_ev_cache[level] = event;
Eric Dumazetc9364632013-06-15 03:30:10 -0700945 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700946
Jarek Poplawskic0851342009-01-12 21:54:16 -0800947 if (next_event > event)
Patrick McHardyfb983d42007-03-16 01:22:39 -0700948 next_event = event;
949
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 m = ~q->row_mask[level];
951 while (m != (int)(-1)) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700952 int prio = ffz(m);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000953
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 m |= 1 << prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700955 skb = htb_dequeue_tree(q, prio, level);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800956 if (likely(skb != NULL))
957 goto ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 }
959 }
John Fastabend25331d62014-09-28 11:53:29 -0700960 qdisc_qstats_overlimit(sch);
Eric Dumazeta9efad82016-05-23 14:24:56 -0700961 if (likely(next_event > q->now))
Eric Dumazet45f50be2016-06-10 16:41:39 -0700962 qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
Eric Dumazeta9efad82016-05-23 14:24:56 -0700963 else
Jarek Poplawski12247362009-02-01 01:13:22 -0800964 schedule_work(&q->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965fin:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 return skb;
967}
968
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969/* reset all classes */
970/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -0700971static void htb_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972{
973 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700974 struct htb_class *cl;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700975 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700977 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800978 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 if (cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700980 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 else {
Stephen Hemminger87990462006-08-10 23:35:16 -0700982 if (cl->un.leaf.q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 qdisc_reset(cl->un.leaf.q);
984 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
985 }
986 cl->prio_activity = 0;
987 cl->cmode = HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 }
989 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700990 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazeta5a9f5342016-06-13 20:21:56 -0700991 __qdisc_reset_queue(&q->direct_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 sch->q.qlen = 0;
WANG Cong431e3a82016-02-25 14:55:02 -0800993 sch->qstats.backlog = 0;
Eric Dumazetc9364632013-06-15 03:30:10 -0700994 memset(q->hlevel, 0, sizeof(q->hlevel));
Stephen Hemminger87990462006-08-10 23:35:16 -0700995 memset(q->row_mask, 0, sizeof(q->row_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 for (i = 0; i < TC_HTB_NUMPRIO; i++)
Stephen Hemminger87990462006-08-10 23:35:16 -0700997 INIT_LIST_HEAD(q->drops + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998}
999
Patrick McHardy27a34212008-01-23 20:35:39 -08001000static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
1001 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
1002 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
1003 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1004 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001005 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001006 [TCA_HTB_RATE64] = { .type = NLA_U64 },
1007 [TCA_HTB_CEIL64] = { .type = NLA_U64 },
Patrick McHardy27a34212008-01-23 20:35:39 -08001008};
1009
Jarek Poplawski12247362009-02-01 01:13:22 -08001010static void htb_work_func(struct work_struct *work)
1011{
1012 struct htb_sched *q = container_of(work, struct htb_sched, work);
1013 struct Qdisc *sch = q->watchdog.qdisc;
1014
Florian Westphal0ee13622016-06-14 06:16:27 +02001015 rcu_read_lock();
Jarek Poplawski12247362009-02-01 01:13:22 -08001016 __netif_schedule(qdisc_root(sch));
Florian Westphal0ee13622016-06-14 06:16:27 +02001017 rcu_read_unlock();
Jarek Poplawski12247362009-02-01 01:13:22 -08001018}
1019
Alexander Aringe63d7df2017-12-20 12:35:13 -05001020static int htb_init(struct Qdisc *sch, struct nlattr *opt,
1021 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001022{
1023 struct htb_sched *q = qdisc_priv(sch);
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001024 struct nlattr *tb[TCA_HTB_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 struct tc_htb_glob *gopt;
Patrick McHardycee63722008-01-23 20:33:32 -08001026 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 int i;
Patrick McHardycee63722008-01-23 20:33:32 -08001028
Nikolay Aleksandrov88c2ace2017-08-30 12:48:57 +03001029 qdisc_watchdog_init(&q->watchdog, sch);
1030 INIT_WORK(&q->work, htb_work_func);
1031
Patrick McHardycee63722008-01-23 20:33:32 -08001032 if (!opt)
1033 return -EINVAL;
1034
Jiri Pirko69d78ef2017-10-13 14:00:57 +02001035 err = tcf_block_get(&q->block, &q->filter_list, sch);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001036 if (err)
1037 return err;
1038
Johannes Bergfceb6432017-04-12 14:34:07 +02001039 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001040 if (err < 0)
1041 return err;
1042
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001043 if (!tb[TCA_HTB_INIT])
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 return -EINVAL;
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001045
Patrick McHardy1e904742008-01-22 22:11:17 -08001046 gopt = nla_data(tb[TCA_HTB_INIT]);
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001047 if (gopt->version != HTB_VER >> 16)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001050 err = qdisc_class_hash_init(&q->clhash);
1051 if (err < 0)
1052 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 for (i = 0; i < TC_HTB_NUMPRIO; i++)
Stephen Hemminger87990462006-08-10 23:35:16 -07001054 INIT_LIST_HEAD(q->drops + i);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055
Florian Westphal48da34b2016-09-18 00:57:34 +02001056 qdisc_skb_head_init(&q->direct_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001058 if (tb[TCA_HTB_DIRECT_QLEN])
1059 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
Phil Sutter348e3432015-08-18 10:30:49 +02001060 else
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001061 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
Phil Sutter348e3432015-08-18 10:30:49 +02001062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1064 q->rate2quantum = 1;
1065 q->defcls = gopt->defcls;
1066
1067 return 0;
1068}
1069
1070static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1071{
1072 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001073 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 struct tc_htb_glob gopt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001076 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1077 * no change can happen on the qdisc parameters.
1078 */
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001079
1080 gopt.direct_pkts = q->direct_pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081 gopt.version = HTB_VER;
1082 gopt.rate2quantum = q->rate2quantum;
1083 gopt.defcls = q->defcls;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001084 gopt.debug = 0;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001085
1086 nest = nla_nest_start(skb, TCA_OPTIONS);
1087 if (nest == NULL)
1088 goto nla_put_failure;
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001089 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1090 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
David S. Miller1b34ec42012-03-29 05:11:39 -04001091 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001092
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001093 return nla_nest_end(skb, nest);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001094
Patrick McHardy1e904742008-01-22 22:11:17 -08001095nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001096 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 return -1;
1098}
1099
1100static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
Stephen Hemminger87990462006-08-10 23:35:16 -07001101 struct sk_buff *skb, struct tcmsg *tcm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102{
Stephen Hemminger87990462006-08-10 23:35:16 -07001103 struct htb_class *cl = (struct htb_class *)arg;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001104 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 struct tc_htb_opt opt;
1106
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001107 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1108 * no change can happen on the class parameters.
1109 */
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001110 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1111 tcm->tcm_handle = cl->common.classid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 if (!cl->level && cl->un.leaf.q)
1113 tcm->tcm_info = cl->un.leaf.q->handle;
1114
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001115 nest = nla_nest_start(skb, TCA_OPTIONS);
1116 if (nest == NULL)
1117 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118
Stephen Hemminger87990462006-08-10 23:35:16 -07001119 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120
Eric Dumazet01cb71d2013-06-02 13:55:05 +00001121 psched_ratecfg_getrate(&opt.rate, &cl->rate);
Jiri Pirko9c10f412013-02-12 00:12:00 +00001122 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
Eric Dumazet01cb71d2013-06-02 13:55:05 +00001123 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
Jiri Pirko9c10f412013-02-12 00:12:00 +00001124 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001125 opt.quantum = cl->quantum;
1126 opt.prio = cl->prio;
Stephen Hemminger87990462006-08-10 23:35:16 -07001127 opt.level = cl->level;
David S. Miller1b34ec42012-03-29 05:11:39 -04001128 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1129 goto nla_put_failure;
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001130 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001131 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1132 TCA_HTB_PAD))
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001133 goto nla_put_failure;
1134 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001135 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1136 TCA_HTB_PAD))
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001137 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001138
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001139 return nla_nest_end(skb, nest);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001140
Patrick McHardy1e904742008-01-22 22:11:17 -08001141nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001142 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 return -1;
1144}
1145
1146static int
Stephen Hemminger87990462006-08-10 23:35:16 -07001147htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148{
Stephen Hemminger87990462006-08-10 23:35:16 -07001149 struct htb_class *cl = (struct htb_class *)arg;
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001150 struct gnet_stats_queue qs = {
1151 .drops = cl->drops,
Eric Dumazet3c75f6e2017-09-18 12:36:22 -07001152 .overlimits = cl->overlimits,
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001153 };
John Fastabend64015852014-09-28 11:53:57 -07001154 __u32 qlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001156 if (!cl->level && cl->un.leaf.q) {
John Fastabend64015852014-09-28 11:53:57 -07001157 qlen = cl->un.leaf.q->q.qlen;
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001158 qs.backlog = cl->un.leaf.q->qstats.backlog;
1159 }
Konstantin Khlebnikov0564bf02016-07-16 17:08:56 +03001160 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1161 INT_MIN, INT_MAX);
1162 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1163 INT_MIN, INT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001165 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1166 d, NULL, &cl->bstats) < 0 ||
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001167 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001168 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 return -1;
1170
1171 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1172}
1173
1174static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Stephen Hemminger87990462006-08-10 23:35:16 -07001175 struct Qdisc **old)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176{
Stephen Hemminger87990462006-08-10 23:35:16 -07001177 struct htb_class *cl = (struct htb_class *)arg;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001179 if (cl->level)
1180 return -EINVAL;
1181 if (new == NULL &&
Changli Gao3511c912010-10-16 13:04:08 +00001182 (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001183 cl->common.classid)) == NULL)
1184 return -ENOBUFS;
1185
WANG Cong86a79962016-02-25 14:55:00 -08001186 *old = qdisc_replace(sch, new, &cl->un.leaf.q);
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001187 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188}
1189
Stephen Hemminger87990462006-08-10 23:35:16 -07001190static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191{
Stephen Hemminger87990462006-08-10 23:35:16 -07001192 struct htb_class *cl = (struct htb_class *)arg;
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001193 return !cl->level ? cl->un.leaf.q : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194}
1195
Patrick McHardy256d61b2006-11-29 17:37:05 -08001196static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1197{
1198 struct htb_class *cl = (struct htb_class *)arg;
1199
Konstantin Khlebnikov95946652017-08-15 16:39:59 +03001200 htb_deactivate(qdisc_priv(sch), cl);
Patrick McHardy256d61b2006-11-29 17:37:05 -08001201}
1202
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001203static inline int htb_parent_last_child(struct htb_class *cl)
1204{
1205 if (!cl->parent)
1206 /* the root class */
1207 return 0;
Patrick McHardy42077592008-07-05 23:22:53 -07001208 if (cl->parent->children > 1)
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001209 /* not the last child */
1210 return 0;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001211 return 1;
1212}
1213
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001214static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1215 struct Qdisc *new_q)
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001216{
1217 struct htb_class *parent = cl->parent;
1218
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001219 WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001220
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001221 if (parent->cmode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -07001222 htb_safe_rb_erase(&parent->pq_node,
1223 &q->hlevel[parent->level].wait_pq);
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001224
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001225 parent->level = 0;
1226 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1227 INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1228 parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001229 parent->tokens = parent->buffer;
1230 parent->ctokens = parent->cbuffer;
Eric Dumazetd2de8752014-08-22 18:32:09 -07001231 parent->t_c = ktime_get_ns();
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001232 parent->cmode = HTB_CAN_SEND;
1233}
1234
Stephen Hemminger87990462006-08-10 23:35:16 -07001235static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 if (!cl->level) {
Ilpo Järvinen547b7922008-07-25 21:43:18 -07001238 WARN_ON(!cl->un.leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 qdisc_destroy(cl->un.leaf.q);
1240 }
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001241 gen_kill_estimator(&cl->rate_est);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001242 tcf_block_put(cl->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 kfree(cl);
1244}
1245
Stephen Hemminger87990462006-08-10 23:35:16 -07001246static void htb_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247{
1248 struct htb_sched *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001249 struct hlist_node *next;
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001250 struct htb_class *cl;
1251 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Jarek Poplawski12247362009-02-01 01:13:22 -08001253 cancel_work_sync(&q->work);
Patrick McHardyfb983d42007-03-16 01:22:39 -07001254 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 /* This line used to be after htb_destroy_class call below
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001256 * and surprisingly it worked in 2.4. But it must precede it
1257 * because filter need its target class alive to be able to call
1258 * unbind_filter on it (without Oops).
1259 */
Jiri Pirko6529eab2017-05-17 11:07:55 +02001260 tcf_block_put(q->block);
Stephen Hemminger87990462006-08-10 23:35:16 -07001261
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001262 for (i = 0; i < q->clhash.hashsize; i++) {
Konstantin Khlebnikov89890422017-08-15 16:35:21 +03001263 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Jiri Pirko6529eab2017-05-17 11:07:55 +02001264 tcf_block_put(cl->block);
Konstantin Khlebnikov89890422017-08-15 16:35:21 +03001265 cl->block = NULL;
1266 }
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001267 }
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001268 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001269 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001270 common.hnode)
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001271 htb_destroy_class(sch, cl);
1272 }
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001273 qdisc_class_hash_destroy(&q->clhash);
Eric Dumazeta5a9f5342016-06-13 20:21:56 -07001274 __qdisc_reset_queue(&q->direct_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275}
1276
1277static int htb_delete(struct Qdisc *sch, unsigned long arg)
1278{
1279 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001280 struct htb_class *cl = (struct htb_class *)arg;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001281 struct Qdisc *new_q = NULL;
1282 int last_child = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283
Yang Yinglianga071d272013-12-23 17:38:59 +08001284 /* TODO: why don't allow to delete subtree ? references ? does
1285 * tc subsys guarantee us that in htb_destroy it holds no class
1286 * refs so that we can remove children safely there ?
1287 */
Patrick McHardy42077592008-07-05 23:22:53 -07001288 if (cl->children || cl->filter_cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 return -EBUSY;
Stephen Hemminger87990462006-08-10 23:35:16 -07001290
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001291 if (!cl->level && htb_parent_last_child(cl)) {
Changli Gao3511c912010-10-16 13:04:08 +00001292 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
David S. Millerbb949fb2008-07-08 16:55:56 -07001293 cl->parent->common.classid);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001294 last_child = 1;
1295 }
1296
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 sch_tree_lock(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001298
Patrick McHardy814a175e2006-11-29 17:34:50 -08001299 if (!cl->level) {
WANG Cong2ccccf52016-02-25 14:55:01 -08001300 unsigned int qlen = cl->un.leaf.q->q.qlen;
1301 unsigned int backlog = cl->un.leaf.q->qstats.backlog;
1302
Patrick McHardy814a175e2006-11-29 17:34:50 -08001303 qdisc_reset(cl->un.leaf.q);
WANG Cong2ccccf52016-02-25 14:55:01 -08001304 qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001305 }
1306
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001307 /* delete from hash and active; remainder in destroy_class */
1308 qdisc_class_hash_remove(&q->clhash, &cl->common);
Jarek Poplawski26b284d2008-08-13 15:16:43 -07001309 if (cl->parent)
1310 cl->parent->children--;
Patrick McHardyc38c83c2007-03-27 14:04:24 -07001311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001313 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001315 if (cl->cmode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -07001316 htb_safe_rb_erase(&cl->pq_node,
1317 &q->hlevel[cl->level].wait_pq);
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001318
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001319 if (last_child)
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001320 htb_parent_to_leaf(q, cl, new_q);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001321
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 sch_tree_unlock(sch);
WANG Cong143976c2017-08-24 16:51:29 -07001323
1324 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 return 0;
1326}
1327
Stephen Hemminger87990462006-08-10 23:35:16 -07001328static int htb_change_class(struct Qdisc *sch, u32 classid,
Patrick McHardy1e904742008-01-22 22:11:17 -08001329 u32 parentid, struct nlattr **tca,
Stephen Hemminger87990462006-08-10 23:35:16 -07001330 unsigned long *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331{
1332 int err = -EINVAL;
1333 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001334 struct htb_class *cl = (struct htb_class *)*arg, *parent;
Patrick McHardy1e904742008-01-22 22:11:17 -08001335 struct nlattr *opt = tca[TCA_OPTIONS];
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001336 struct nlattr *tb[TCA_HTB_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 struct tc_htb_opt *hopt;
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001338 u64 rate64, ceil64;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
1340 /* extract all subattrs from opt attr */
Patrick McHardycee63722008-01-23 20:33:32 -08001341 if (!opt)
1342 goto failure;
1343
Johannes Bergfceb6432017-04-12 14:34:07 +02001344 err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy, NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001345 if (err < 0)
1346 goto failure;
1347
1348 err = -EINVAL;
Patrick McHardy27a34212008-01-23 20:35:39 -08001349 if (tb[TCA_HTB_PARMS] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351
Stephen Hemminger87990462006-08-10 23:35:16 -07001352 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001353
Patrick McHardy1e904742008-01-22 22:11:17 -08001354 hopt = nla_data(tb[TCA_HTB_PARMS]);
Eric Dumazet196d97f2012-11-05 16:40:49 +00001355 if (!hopt->rate.rate || !hopt->ceil.rate)
Stephen Hemminger87990462006-08-10 23:35:16 -07001356 goto failure;
1357
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +02001358 /* Keeping backward compatible with rate_table based iproute2 tc */
Yang Yingliang6b1dd852013-12-11 15:48:37 +08001359 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1360 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
1361
1362 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1363 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +02001364
Stephen Hemminger87990462006-08-10 23:35:16 -07001365 if (!cl) { /* new class */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001366 struct Qdisc *new_q;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001367 int prio;
Patrick McHardyee39e102007-07-02 22:48:13 -07001368 struct {
Patrick McHardy1e904742008-01-22 22:11:17 -08001369 struct nlattr nla;
Patrick McHardyee39e102007-07-02 22:48:13 -07001370 struct gnet_estimator opt;
1371 } est = {
Patrick McHardy1e904742008-01-22 22:11:17 -08001372 .nla = {
1373 .nla_len = nla_attr_size(sizeof(est.opt)),
1374 .nla_type = TCA_RATE,
Patrick McHardyee39e102007-07-02 22:48:13 -07001375 },
1376 .opt = {
1377 /* 4s interval, 16s averaging constant */
1378 .interval = 2,
1379 .ewma_log = 2,
1380 },
1381 };
Stephen Hemminger3696f622006-08-10 23:36:01 -07001382
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 /* check for valid classid */
Joe Perchesf64f9e72009-11-29 16:55:45 -08001384 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1385 htb_find(classid, sch))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 goto failure;
1387
1388 /* check maximal depth */
1389 if (parent && parent->parent && parent->parent->level < 2) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001390 pr_err("htb: tree is too deep\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 goto failure;
1392 }
1393 err = -ENOBUFS;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001394 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1395 if (!cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 goto failure;
Stephen Hemminger87990462006-08-10 23:35:16 -07001397
Jiri Pirko69d78ef2017-10-13 14:00:57 +02001398 err = tcf_block_get(&cl->block, &cl->filter_list, sch);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001399 if (err) {
1400 kfree(cl);
1401 goto failure;
1402 }
Eric Dumazet64153ce2013-06-06 14:53:16 -07001403 if (htb_rate_est || tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001404 err = gen_new_estimator(&cl->bstats, NULL,
1405 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001406 NULL,
1407 qdisc_root_sleeping_running(sch),
Eric Dumazet64153ce2013-06-06 14:53:16 -07001408 tca[TCA_RATE] ? : &est.nla);
1409 if (err) {
Jiri Pirko6529eab2017-05-17 11:07:55 +02001410 tcf_block_put(cl->block);
Eric Dumazet64153ce2013-06-06 14:53:16 -07001411 kfree(cl);
1412 goto failure;
1413 }
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001414 }
1415
Patrick McHardy42077592008-07-05 23:22:53 -07001416 cl->children = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
Stephen Hemminger3696f622006-08-10 23:36:01 -07001418 RB_CLEAR_NODE(&cl->pq_node);
1419
1420 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1421 RB_CLEAR_NODE(&cl->node[prio]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
1423 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001424 * so that can't be used inside of sch_tree_lock
1425 * -- thanks to Karlis Peisenieks
1426 */
Changli Gao3511c912010-10-16 13:04:08 +00001427 new_q = qdisc_create_dflt(sch->dev_queue,
David S. Millerbb949fb2008-07-08 16:55:56 -07001428 &pfifo_qdisc_ops, classid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 sch_tree_lock(sch);
1430 if (parent && !parent->level) {
Patrick McHardy256d61b2006-11-29 17:37:05 -08001431 unsigned int qlen = parent->un.leaf.q->q.qlen;
WANG Cong2ccccf52016-02-25 14:55:01 -08001432 unsigned int backlog = parent->un.leaf.q->qstats.backlog;
Patrick McHardy256d61b2006-11-29 17:37:05 -08001433
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 /* turn parent into inner node */
Patrick McHardy256d61b2006-11-29 17:37:05 -08001435 qdisc_reset(parent->un.leaf.q);
WANG Cong2ccccf52016-02-25 14:55:01 -08001436 qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
Stephen Hemminger87990462006-08-10 23:35:16 -07001437 qdisc_destroy(parent->un.leaf.q);
1438 if (parent->prio_activity)
1439 htb_deactivate(q, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440
1441 /* remove from evt list because of level change */
1442 if (parent->cmode != HTB_CAN_SEND) {
Eric Dumazetc9364632013-06-15 03:30:10 -07001443 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 parent->cmode = HTB_CAN_SEND;
1445 }
1446 parent->level = (parent->parent ? parent->parent->level
Stephen Hemminger87990462006-08-10 23:35:16 -07001447 : TC_HTB_MAXDEPTH) - 1;
1448 memset(&parent->un.inner, 0, sizeof(parent->un.inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 }
1450 /* leaf (we) needs elementary qdisc */
1451 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1452
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001453 cl->common.classid = classid;
Stephen Hemminger87990462006-08-10 23:35:16 -07001454 cl->parent = parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
1456 /* set class to be in HTB_CAN_SEND state */
Jiri Pirkob9a7afd2013-02-12 00:12:02 +00001457 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1458 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
Eric Dumazet5343a7f2013-06-04 07:11:48 +00001459 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
Eric Dumazetd2de8752014-08-22 18:32:09 -07001460 cl->t_c = ktime_get_ns();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 cl->cmode = HTB_CAN_SEND;
1462
1463 /* attach to the hash list and parent's family */
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001464 qdisc_class_hash_insert(&q->clhash, &cl->common);
Patrick McHardy42077592008-07-05 23:22:53 -07001465 if (parent)
1466 parent->children++;
Jiri Kosina49b49972017-03-08 16:03:32 +01001467 if (cl->un.leaf.q != &noop_qdisc)
1468 qdisc_hash_add(cl->un.leaf.q, true);
Patrick McHardyee39e102007-07-02 22:48:13 -07001469 } else {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001470 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001471 err = gen_replace_estimator(&cl->bstats, NULL,
1472 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001473 NULL,
1474 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001475 tca[TCA_RATE]);
1476 if (err)
1477 return err;
1478 }
Stephen Hemminger87990462006-08-10 23:35:16 -07001479 sch_tree_lock(sch);
Patrick McHardyee39e102007-07-02 22:48:13 -07001480 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
Yang Yingliang1598f7c2013-12-10 14:59:28 +08001482 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1483
1484 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1485
1486 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1487 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1488
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 /* it used to be a nasty bug here, we have to check that node
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001490 * is really leaf before changing cl->un.leaf !
1491 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 if (!cl->level) {
Yang Yingliang1598f7c2013-12-10 14:59:28 +08001493 u64 quantum = cl->rate.rate_bytes_ps;
1494
1495 do_div(quantum, q->rate2quantum);
1496 cl->quantum = min_t(u64, quantum, INT_MAX);
1497
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001498 if (!hopt->quantum && cl->quantum < 1000) {
Yang Yingliangc17988a2013-12-23 17:38:58 +08001499 pr_warn("HTB: quantum of class %X is small. Consider r2q change.\n",
1500 cl->common.classid);
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001501 cl->quantum = 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 }
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001503 if (!hopt->quantum && cl->quantum > 200000) {
Yang Yingliangc17988a2013-12-23 17:38:58 +08001504 pr_warn("HTB: quantum of class %X is big. Consider r2q change.\n",
1505 cl->common.classid);
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001506 cl->quantum = 200000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 }
1508 if (hopt->quantum)
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001509 cl->quantum = hopt->quantum;
1510 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1511 cl->prio = TC_HTB_NUMPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 }
1513
Jiri Pirko324f5aa2013-02-12 00:11:59 +00001514 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
Vimalkumarf3ad8572013-09-10 17:36:37 -07001515 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
Vimalkumar56b765b2012-10-31 06:04:11 +00001516
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517 sch_tree_unlock(sch);
1518
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001519 qdisc_class_hash_grow(sch, &q->clhash);
1520
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 *arg = (unsigned long)cl;
1522 return 0;
1523
1524failure:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525 return err;
1526}
1527
Jiri Pirko6529eab2017-05-17 11:07:55 +02001528static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529{
1530 struct htb_sched *q = qdisc_priv(sch);
1531 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001532
Jiri Pirko6529eab2017-05-17 11:07:55 +02001533 return cl ? cl->block : q->block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534}
1535
1536static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
Stephen Hemminger87990462006-08-10 23:35:16 -07001537 u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538{
Stephen Hemminger87990462006-08-10 23:35:16 -07001539 struct htb_class *cl = htb_find(classid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001540
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 /*if (cl && !cl->level) return 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001542 * The line above used to be there to prevent attaching filters to
1543 * leaves. But at least tc_index filter uses this just to get class
1544 * for other reasons so that we have to allow for it.
1545 * ----
1546 * 19.6.2002 As Werner explained it is ok - bind filter is just
1547 * another way to "lock" the class - unlike "get" this lock can
1548 * be broken by class during destroy IIUC.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 */
Stephen Hemminger87990462006-08-10 23:35:16 -07001550 if (cl)
1551 cl->filter_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 return (unsigned long)cl;
1553}
1554
1555static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1556{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001558
Stephen Hemminger87990462006-08-10 23:35:16 -07001559 if (cl)
1560 cl->filter_cnt--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561}
1562
1563static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1564{
1565 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001566 struct htb_class *cl;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001567 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001568
1569 if (arg->stop)
1570 return;
1571
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001572 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08001573 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 if (arg->count < arg->skip) {
1575 arg->count++;
1576 continue;
1577 }
1578 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1579 arg->stop = 1;
1580 return;
1581 }
1582 arg->count++;
1583 }
1584 }
1585}
1586
Eric Dumazet20fea082007-11-14 01:44:41 -08001587static const struct Qdisc_class_ops htb_class_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 .graft = htb_graft,
1589 .leaf = htb_leaf,
Patrick McHardy256d61b2006-11-29 17:37:05 -08001590 .qlen_notify = htb_qlen_notify,
WANG Cong143976c2017-08-24 16:51:29 -07001591 .find = htb_search,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 .change = htb_change_class,
1593 .delete = htb_delete,
1594 .walk = htb_walk,
Jiri Pirko6529eab2017-05-17 11:07:55 +02001595 .tcf_block = htb_tcf_block,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 .bind_tcf = htb_bind_filter,
1597 .unbind_tcf = htb_unbind_filter,
1598 .dump = htb_dump_class,
1599 .dump_stats = htb_dump_class_stats,
1600};
1601
Eric Dumazet20fea082007-11-14 01:44:41 -08001602static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 .cl_ops = &htb_class_ops,
1604 .id = "htb",
1605 .priv_size = sizeof(struct htb_sched),
1606 .enqueue = htb_enqueue,
1607 .dequeue = htb_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07001608 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001609 .init = htb_init,
1610 .reset = htb_reset,
1611 .destroy = htb_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 .dump = htb_dump,
1613 .owner = THIS_MODULE,
1614};
1615
1616static int __init htb_module_init(void)
1617{
Stephen Hemminger87990462006-08-10 23:35:16 -07001618 return register_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619}
Stephen Hemminger87990462006-08-10 23:35:16 -07001620static void __exit htb_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621{
Stephen Hemminger87990462006-08-10 23:35:16 -07001622 unregister_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001623}
Stephen Hemminger87990462006-08-10 23:35:16 -07001624
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625module_init(htb_module_init)
1626module_exit(htb_module_exit)
1627MODULE_LICENSE("GPL");