blob: 66c330244b9d7d78bc4e1114a7af5ecb6994739c [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Stephen Hemminger87990462006-08-10 23:35:16 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors: Martin Devera, <devik@cdi.cz>
6 *
7 * Credits (in time order) for older HTB versions:
8 * Stef Coene <stef.coene@docum.org>
9 * HTB support at LARTC mailing list
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * Ondrej Kraus, <krauso@barr.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * found missing INIT_QDISC(htb)
12 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
13 * helped a lot to locate nasty class stall bug
14 * Andi Kleen, Jamal Hadi, Bert Hubert
15 * code review and helpful comments on shaping
16 * Tomasz Wrona, <tw@eter.tym.pl>
17 * created test case so that I was able to fix nasty bug
18 * Wilfried Weissmann
19 * spotted bug in dequeue code and helped with fix
20 * Jiri Fojtasek
21 * fixed requeue routine
22 * and many others. thanks.
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/module.h>
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070025#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/types.h>
27#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/skbuff.h>
31#include <linux/list.h>
32#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/rbtree.h>
Jarek Poplawski12247362009-02-01 01:13:22 -080034#include <linux/workqueue.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070036#include <net/netlink.h>
Jiri Pirko292f1c72013-02-12 00:12:03 +000037#include <net/sch_generic.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070038#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010039#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41/* HTB algorithm.
42 Author: devik@cdi.cz
43 ========================================================================
44 HTB is like TBF with multiple classes. It is also similar to CBQ because
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090045 it allows to assign priority to each class in hierarchy.
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 In fact it is another implementation of Floyd's formal sharing.
47
48 Levels:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090049 Each class is assigned level. Leaf has ALWAYS level 0 and root
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
51 one less than their parent.
52*/
53
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070054static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
Zheng Yongjun37f2ad22021-05-31 10:00:48 +080055#define HTB_VER 0x30011 /* major must be matched with number supplied by TC as version */
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#if HTB_VER >> 16 != TC_HTB_PROTOVER
58#error "Mismatched sch_htb.c and pkt_sch.h"
59#endif
60
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070061/* Module parameter and sysfs export */
62module_param (htb_hysteresis, int, 0640);
63MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
64
Eric Dumazet64153ce2013-06-06 14:53:16 -070065static int htb_rate_est = 0; /* htb classes have a default rate estimator */
66module_param(htb_rate_est, int, 0640);
67MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* used internaly to keep status of single class */
70enum htb_cmode {
Stephen Hemminger87990462006-08-10 23:35:16 -070071 HTB_CANT_SEND, /* class can't send and can't borrow */
72 HTB_MAY_BORROW, /* class can't send but may borrow */
73 HTB_CAN_SEND /* class can send */
Linus Torvalds1da177e2005-04-16 15:20:36 -070074};
75
Eric Dumazetc9364632013-06-15 03:30:10 -070076struct htb_prio {
77 union {
78 struct rb_root row;
79 struct rb_root feed;
80 };
81 struct rb_node *ptr;
82 /* When class changes from state 1->2 and disconnects from
83 * parent's feed then we lost ptr value and start from the
84 * first child again. Here we store classid of the
85 * last valid ptr (used when ptr is NULL).
86 */
87 u32 last_ptr_id;
88};
89
Eric Dumazetca4ec902013-06-13 07:58:30 -070090/* interior & leaf nodes; props specific to leaves are marked L:
91 * To reduce false sharing, place mostly read fields at beginning,
92 * and mostly written ones at the end.
93 */
Stephen Hemminger87990462006-08-10 23:35:16 -070094struct htb_class {
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -070095 struct Qdisc_class_common common;
Eric Dumazetca4ec902013-06-13 07:58:30 -070096 struct psched_ratecfg rate;
97 struct psched_ratecfg ceil;
98 s64 buffer, cbuffer;/* token bucket depth/rate */
99 s64 mbuffer; /* max wait time */
stephen hemmingercbd37552013-08-01 22:32:07 -0700100 u32 prio; /* these two are used only by leaves... */
Eric Dumazetca4ec902013-06-13 07:58:30 -0700101 int quantum; /* but stored for parent-to-leaf return */
102
John Fastabend25d8c0d2014-09-12 20:05:27 -0700103 struct tcf_proto __rcu *filter_list; /* class attached filters */
Jiri Pirko6529eab2017-05-17 11:07:55 +0200104 struct tcf_block *block;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700105 int filter_cnt;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700106
107 int level; /* our level (see above) */
108 unsigned int children;
109 struct htb_class *parent; /* parent class */
110
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800111 struct net_rate_estimator __rcu *rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Eric Dumazetca4ec902013-06-13 07:58:30 -0700113 /*
114 * Written often fields
115 */
116 struct gnet_stats_basic_packed bstats;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +0200117 struct gnet_stats_basic_packed bstats_bias;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700118 struct tc_htb_xstats xstats; /* our special stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Eric Dumazetca4ec902013-06-13 07:58:30 -0700120 /* token bucket parameters */
121 s64 tokens, ctokens;/* current number of tokens */
122 s64 t_c; /* checkpoint time */
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800123
Stephen Hemminger87990462006-08-10 23:35:16 -0700124 union {
125 struct htb_class_leaf {
Eric Dumazetc9364632013-06-15 03:30:10 -0700126 int deficit[TC_HTB_MAXDEPTH];
127 struct Qdisc *q;
Stephen Hemminger87990462006-08-10 23:35:16 -0700128 } leaf;
129 struct htb_class_inner {
Eric Dumazetc9364632013-06-15 03:30:10 -0700130 struct htb_prio clprio[TC_HTB_NUMPRIO];
Stephen Hemminger87990462006-08-10 23:35:16 -0700131 } inner;
Cong Wang11957be2018-09-07 13:29:14 -0700132 };
Eric Dumazetca4ec902013-06-13 07:58:30 -0700133 s64 pq_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Eric Dumazetca4ec902013-06-13 07:58:30 -0700135 int prio_activity; /* for which prios are we active */
136 enum htb_cmode cmode; /* current mode of the class */
137 struct rb_node pq_node; /* node for event queue */
138 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
Eric Dumazet338ed9b2016-06-21 23:16:51 -0700139
140 unsigned int drops ____cacheline_aligned_in_smp;
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700141 unsigned int overlimits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142};
143
Eric Dumazetc9364632013-06-15 03:30:10 -0700144struct htb_level {
145 struct rb_root wait_pq;
146 struct htb_prio hprio[TC_HTB_NUMPRIO];
147};
148
Stephen Hemminger87990462006-08-10 23:35:16 -0700149struct htb_sched {
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700150 struct Qdisc_class_hash clhash;
Eric Dumazetc9364632013-06-15 03:30:10 -0700151 int defcls; /* class where unclassified flows go to */
152 int rate2quantum; /* quant = rate / rate2quantum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Stephen Hemminger87990462006-08-10 23:35:16 -0700154 /* filters for qdisc itself */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700155 struct tcf_proto __rcu *filter_list;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200156 struct tcf_block *block;
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800157
158#define HTB_WARN_TOOMANYEVENTS 0x1
Eric Dumazetc9364632013-06-15 03:30:10 -0700159 unsigned int warned; /* only one warning */
160 int direct_qlen;
161 struct work_struct work;
162
163 /* non shaped skbs; let them go directly thru */
Florian Westphal48da34b2016-09-18 00:57:34 +0200164 struct qdisc_skb_head direct_queue;
Cong Wangb3624872019-05-04 11:43:42 -0700165 u32 direct_pkts;
166 u32 overlimits;
Eric Dumazetc9364632013-06-15 03:30:10 -0700167
168 struct qdisc_watchdog watchdog;
169
170 s64 now; /* cached dequeue time */
Eric Dumazetc9364632013-06-15 03:30:10 -0700171
172 /* time of nearest event per level (row) */
173 s64 near_ev_cache[TC_HTB_MAXDEPTH];
174
175 int row_mask[TC_HTB_MAXDEPTH];
176
177 struct htb_level hlevel[TC_HTB_MAXDEPTH];
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200178
179 struct Qdisc **direct_qdiscs;
180 unsigned int num_direct_qdiscs;
181
182 bool offload;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183};
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185/* find class in global hash table using given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700186static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
188 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700189 struct Qdisc_class_common *clc;
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700190
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700191 clc = qdisc_class_find(&q->clhash, handle);
192 if (clc == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 return NULL;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700194 return container_of(clc, struct htb_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195}
196
WANG Cong143976c2017-08-24 16:51:29 -0700197static unsigned long htb_search(struct Qdisc *sch, u32 handle)
198{
199 return (unsigned long)htb_find(handle, sch);
200}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201/**
202 * htb_classify - classify a packet into class
203 *
204 * It returns NULL if the packet should be dropped or -1 if the packet
205 * should be passed directly thru. In all other cases leaf class is returned.
206 * We allow direct class selection by classid in priority. The we examine
207 * filters in qdisc and in inner nodes (if higher filter points to the inner
208 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900209 * internal fifo (direct). These packets then go directly thru. If we still
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300210 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 * then finish and return direct queue.
212 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000213#define HTB_DIRECT ((struct htb_class *)-1L)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Stephen Hemminger87990462006-08-10 23:35:16 -0700215static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
216 int *qerr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
218 struct htb_sched *q = qdisc_priv(sch);
219 struct htb_class *cl;
220 struct tcf_result res;
221 struct tcf_proto *tcf;
222 int result;
223
224 /* allow to select class by setting skb->priority to valid classid;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000225 * note that nfmark can be used too by attaching filter fw with no
226 * rules in it
227 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 if (skb->priority == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700229 return HTB_DIRECT; /* X:0 (direct flow) selected */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000230 cl = htb_find(skb->priority, sch);
Harry Mason29824312014-01-17 13:22:32 +0000231 if (cl) {
232 if (cl->level == 0)
233 return cl;
234 /* Start with inner filter chain if a non-leaf class is selected */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700235 tcf = rcu_dereference_bh(cl->filter_list);
Harry Mason29824312014-01-17 13:22:32 +0000236 } else {
John Fastabend25d8c0d2014-09-12 20:05:27 -0700237 tcf = rcu_dereference_bh(q->filter_list);
Harry Mason29824312014-01-17 13:22:32 +0000238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700240 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Jiri Pirko87d83092017-05-17 11:07:54 +0200241 while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242#ifdef CONFIG_NET_CLS_ACT
243 switch (result) {
244 case TC_ACT_QUEUED:
Stephen Hemminger87990462006-08-10 23:35:16 -0700245 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +0200246 case TC_ACT_TRAP:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700247 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Gustavo A. R. Silva964201d2020-07-07 12:21:38 -0500248 fallthrough;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 case TC_ACT_SHOT:
250 return NULL;
251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252#endif
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000253 cl = (void *)res.class;
254 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 if (res.classid == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700256 return HTB_DIRECT; /* X:0 (direct flow) */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000257 cl = htb_find(res.classid, sch);
258 if (!cl)
Stephen Hemminger87990462006-08-10 23:35:16 -0700259 break; /* filter selected invalid classid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
261 if (!cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700262 return cl; /* we hit leaf; return it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264 /* we have got inner class; apply inner filter chain */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700265 tcf = rcu_dereference_bh(cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 }
267 /* classification failed; try to use default class */
Stephen Hemminger87990462006-08-10 23:35:16 -0700268 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 if (!cl || cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700270 return HTB_DIRECT; /* bad default .. this is safe bet */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 return cl;
272}
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274/**
275 * htb_add_to_id_tree - adds class to the round robin list
Yu Kuaia10541f2021-06-03 22:07:49 +0800276 * @root: the root of the tree
277 * @cl: the class to add
278 * @prio: the give prio in class
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 *
280 * Routine adds class to the list (actually tree) sorted by classid.
281 * Make sure that class is not already on such list for given prio.
282 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700283static void htb_add_to_id_tree(struct rb_root *root,
284 struct htb_class *cl, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285{
286 struct rb_node **p = &root->rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700289 struct htb_class *c;
290 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 c = rb_entry(parent, struct htb_class, node[prio]);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700292
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700293 if (cl->common.classid > c->common.classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700295 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 p = &parent->rb_left;
297 }
298 rb_link_node(&cl->node[prio], parent, p);
299 rb_insert_color(&cl->node[prio], root);
300}
301
302/**
303 * htb_add_to_wait_tree - adds class to the event queue with delay
Yu Kuai4d7efa72021-06-05 18:18:33 +0800304 * @q: the priority event queue
305 * @cl: the class to add
306 * @delay: delay in microseconds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 *
308 * The class is added to priority event queue to indicate that class will
309 * change its mode in cl->pq_key microseconds. Make sure that class is not
310 * already in the queue.
311 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700312static void htb_add_to_wait_tree(struct htb_sched *q,
Vimalkumar56b765b2012-10-31 06:04:11 +0000313 struct htb_class *cl, s64 delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314{
Eric Dumazetc9364632013-06-15 03:30:10 -0700315 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700316
Patrick McHardyfb983d42007-03-16 01:22:39 -0700317 cl->pq_key = q->now + delay;
318 if (cl->pq_key == q->now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 cl->pq_key++;
320
321 /* update the nearest event cache */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700322 if (q->near_ev_cache[cl->level] > cl->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 q->near_ev_cache[cl->level] = cl->pq_key;
Stephen Hemminger87990462006-08-10 23:35:16 -0700324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700326 struct htb_class *c;
327 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 c = rb_entry(parent, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700329 if (cl->pq_key >= c->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700331 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 p = &parent->rb_left;
333 }
334 rb_link_node(&cl->pq_node, parent, p);
Eric Dumazetc9364632013-06-15 03:30:10 -0700335 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336}
337
338/**
339 * htb_next_rb_node - finds next node in binary tree
Yu Kuai274e5d02021-06-05 18:18:34 +0800340 * @n: the current node in binary tree
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 *
342 * When we are past last key we return NULL.
343 * Average complexity is 2 steps per call.
344 */
Stephen Hemminger3696f622006-08-10 23:36:01 -0700345static inline void htb_next_rb_node(struct rb_node **n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
347 *n = rb_next(*n);
348}
349
350/**
351 * htb_add_class_to_row - add class to its row
Yu Kuai996bccc2021-06-05 18:18:35 +0800352 * @q: the priority event queue
353 * @cl: the class to add
354 * @mask: the given priorities in class in bitmap
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 *
356 * The class is added to row at priorities marked in mask.
357 * It does nothing if mask == 0.
358 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700359static inline void htb_add_class_to_row(struct htb_sched *q,
360 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 q->row_mask[cl->level] |= mask;
363 while (mask) {
364 int prio = ffz(~mask);
365 mask &= ~(1 << prio);
Eric Dumazetc9364632013-06-15 03:30:10 -0700366 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 }
368}
369
Stephen Hemminger3696f622006-08-10 23:36:01 -0700370/* If this triggers, it is a bug in this code, but it need not be fatal */
371static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
372{
Ismail Donmez81771b32006-10-03 13:49:10 -0700373 if (RB_EMPTY_NODE(rb)) {
Stephen Hemminger3696f622006-08-10 23:36:01 -0700374 WARN_ON(1);
375 } else {
376 rb_erase(rb, root);
377 RB_CLEAR_NODE(rb);
378 }
379}
380
381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382/**
383 * htb_remove_class_from_row - removes class from its row
Yu Kuai5f8c6d02021-06-05 18:18:36 +0800384 * @q: the priority event queue
385 * @cl: the class to add
386 * @mask: the given priorities in class in bitmap
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387 *
388 * The class is removed from row at priorities marked in mask.
389 * It does nothing if mask == 0.
390 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700391static inline void htb_remove_class_from_row(struct htb_sched *q,
392 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393{
394 int m = 0;
Eric Dumazetc9364632013-06-15 03:30:10 -0700395 struct htb_level *hlevel = &q->hlevel[cl->level];
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700396
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 while (mask) {
398 int prio = ffz(~mask);
Eric Dumazetc9364632013-06-15 03:30:10 -0700399 struct htb_prio *hprio = &hlevel->hprio[prio];
Stephen Hemminger3696f622006-08-10 23:36:01 -0700400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 mask &= ~(1 << prio);
Eric Dumazetc9364632013-06-15 03:30:10 -0700402 if (hprio->ptr == cl->node + prio)
403 htb_next_rb_node(&hprio->ptr);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700404
Eric Dumazetc9364632013-06-15 03:30:10 -0700405 htb_safe_rb_erase(cl->node + prio, &hprio->row);
406 if (!hprio->row.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 m |= 1 << prio;
408 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 q->row_mask[cl->level] &= ~m;
410}
411
412/**
413 * htb_activate_prios - creates active classe's feed chain
Yu Kuai876b5fc2021-06-05 18:18:37 +0800414 * @q: the priority event queue
415 * @cl: the class to activate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 *
417 * The class is connected to ancestors and/or appropriate rows
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900418 * for priorities it is participating on. cl->cmode must be new
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 * (activated) mode. It does nothing if cl->prio_activity == 0.
420 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700421static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422{
423 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700424 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
426 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700427 m = mask;
428 while (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 int prio = ffz(~m);
430 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700431
Cong Wang11957be2018-09-07 13:29:14 -0700432 if (p->inner.clprio[prio].feed.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 /* parent already has its feed in use so that
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000434 * reset bit in mask as parent is already ok
435 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700437
Cong Wang11957be2018-09-07 13:29:14 -0700438 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 p->prio_activity |= mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700441 cl = p;
442 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700443
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 }
445 if (cl->cmode == HTB_CAN_SEND && mask)
Stephen Hemminger87990462006-08-10 23:35:16 -0700446 htb_add_class_to_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447}
448
449/**
450 * htb_deactivate_prios - remove class from feed chain
Yu Kuai4113be22021-06-05 18:18:38 +0800451 * @q: the priority event queue
452 * @cl: the class to deactivate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 *
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900454 * cl->cmode must represent old mode (before deactivation). It does
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455 * nothing if cl->prio_activity == 0. Class is removed from all feed
456 * chains and rows.
457 */
458static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
459{
460 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700461 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700464 m = mask;
465 mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 while (m) {
467 int prio = ffz(~m);
468 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700469
Cong Wang11957be2018-09-07 13:29:14 -0700470 if (p->inner.clprio[prio].ptr == cl->node + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 /* we are removing child which is pointed to from
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000472 * parent feed - forget the pointer but remember
473 * classid
474 */
Cong Wang11957be2018-09-07 13:29:14 -0700475 p->inner.clprio[prio].last_ptr_id = cl->common.classid;
476 p->inner.clprio[prio].ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700478
Eric Dumazetc9364632013-06-15 03:30:10 -0700479 htb_safe_rb_erase(cl->node + prio,
Cong Wang11957be2018-09-07 13:29:14 -0700480 &p->inner.clprio[prio].feed);
Stephen Hemminger87990462006-08-10 23:35:16 -0700481
Cong Wang11957be2018-09-07 13:29:14 -0700482 if (!p->inner.clprio[prio].feed.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 mask |= 1 << prio;
484 }
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 p->prio_activity &= ~mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700487 cl = p;
488 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700491 if (cl->cmode == HTB_CAN_SEND && mask)
492 htb_remove_class_from_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493}
494
Vimalkumar56b765b2012-10-31 06:04:11 +0000495static inline s64 htb_lowater(const struct htb_class *cl)
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700496{
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700497 if (htb_hysteresis)
498 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
499 else
500 return 0;
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700501}
Vimalkumar56b765b2012-10-31 06:04:11 +0000502static inline s64 htb_hiwater(const struct htb_class *cl)
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700503{
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700504 if (htb_hysteresis)
505 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
506 else
507 return 0;
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700508}
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700509
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700510
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511/**
512 * htb_class_mode - computes and returns current class mode
Yu Kuai1e955952021-06-05 18:18:39 +0800513 * @cl: the target class
514 * @diff: diff time in microseconds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 *
516 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
517 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900518 * from now to time when cl will change its state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519 * Also it is worth to note that class mode doesn't change simply
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900520 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
522 * mode transitions per time unit. The speed gain is about 1/6.
523 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700524static inline enum htb_cmode
Vimalkumar56b765b2012-10-31 06:04:11 +0000525htb_class_mode(struct htb_class *cl, s64 *diff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526{
Vimalkumar56b765b2012-10-31 06:04:11 +0000527 s64 toks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528
Stephen Hemminger87990462006-08-10 23:35:16 -0700529 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
530 *diff = -toks;
531 return HTB_CANT_SEND;
532 }
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700533
Stephen Hemminger87990462006-08-10 23:35:16 -0700534 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
535 return HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
Stephen Hemminger87990462006-08-10 23:35:16 -0700537 *diff = -toks;
538 return HTB_MAY_BORROW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539}
540
541/**
542 * htb_change_class_mode - changes classe's mode
Yu Kuai4b479e92021-06-05 18:18:40 +0800543 * @q: the priority event queue
544 * @cl: the target class
545 * @diff: diff time in microseconds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 *
547 * This should be the only way how to change classe's mode under normal
Zheng Yongjun37f2ad22021-05-31 10:00:48 +0800548 * circumstances. Routine will update feed lists linkage, change mode
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 * and add class to the wait event queue if appropriate. New mode should
550 * be different from old one and cl->pq_key has to be valid if changing
551 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
552 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700553static void
Vimalkumar56b765b2012-10-31 06:04:11 +0000554htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
Stephen Hemminger87990462006-08-10 23:35:16 -0700555{
556 enum htb_cmode new_mode = htb_class_mode(cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557
558 if (new_mode == cl->cmode)
Stephen Hemminger87990462006-08-10 23:35:16 -0700559 return;
560
Cong Wangb3624872019-05-04 11:43:42 -0700561 if (new_mode == HTB_CANT_SEND) {
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700562 cl->overlimits++;
Cong Wangb3624872019-05-04 11:43:42 -0700563 q->overlimits++;
564 }
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700565
Stephen Hemminger87990462006-08-10 23:35:16 -0700566 if (cl->prio_activity) { /* not necessary: speed optimization */
567 if (cl->cmode != HTB_CANT_SEND)
568 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 cl->cmode = new_mode;
Stephen Hemminger87990462006-08-10 23:35:16 -0700570 if (new_mode != HTB_CANT_SEND)
571 htb_activate_prios(q, cl);
572 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 cl->cmode = new_mode;
574}
575
576/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900577 * htb_activate - inserts leaf cl into appropriate active feeds
Yu Kuai8df7e8f2021-06-05 18:18:41 +0800578 * @q: the priority event queue
579 * @cl: the target class
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 *
581 * Routine learns (new) priority of leaf and activates feed chain
582 * for the prio. It can be called on already active leaf safely.
583 * It also adds leaf into droplist.
584 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700585static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586{
Cong Wang11957be2018-09-07 13:29:14 -0700587 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700588
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 if (!cl->prio_activity) {
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800590 cl->prio_activity = 1 << cl->prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700591 htb_activate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 }
593}
594
595/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900596 * htb_deactivate - remove leaf cl from active feeds
Yu Kuai9a034f22021-06-05 18:18:42 +0800597 * @q: the priority event queue
598 * @cl: the target class
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 *
600 * Make sure that leaf is active. In the other words it can't be called
601 * with non-active leaf. It also removes class from the drop list.
602 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700603static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700605 WARN_ON(!cl->prio_activity);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700606
Stephen Hemminger87990462006-08-10 23:35:16 -0700607 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 cl->prio_activity = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609}
610
Eric Dumazet520ac302016-06-21 23:16:49 -0700611static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
612 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613{
Kees Cook3f649ab2020-06-03 13:09:38 -0700614 int ret;
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +0100615 unsigned int len = qdisc_pkt_len(skb);
Stephen Hemminger87990462006-08-10 23:35:16 -0700616 struct htb_sched *q = qdisc_priv(sch);
617 struct htb_class *cl = htb_classify(skb, sch, &ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Stephen Hemminger87990462006-08-10 23:35:16 -0700619 if (cl == HTB_DIRECT) {
620 /* enqueue to helper queue */
621 if (q->direct_queue.qlen < q->direct_qlen) {
David S. Milleraea890b2018-07-29 16:22:13 -0700622 __qdisc_enqueue_tail(skb, &q->direct_queue);
Stephen Hemminger87990462006-08-10 23:35:16 -0700623 q->direct_pkts++;
624 } else {
Eric Dumazet520ac302016-06-21 23:16:49 -0700625 return qdisc_drop(skb, sch, to_free);
Stephen Hemminger87990462006-08-10 23:35:16 -0700626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627#ifdef CONFIG_NET_CLS_ACT
Stephen Hemminger87990462006-08-10 23:35:16 -0700628 } else if (!cl) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700629 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700630 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700631 __qdisc_drop(skb, to_free);
Stephen Hemminger87990462006-08-10 23:35:16 -0700632 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633#endif
Cong Wang11957be2018-09-07 13:29:14 -0700634 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
Eric Dumazet520ac302016-06-21 23:16:49 -0700635 to_free)) != NET_XMIT_SUCCESS) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700636 if (net_xmit_drop_count(ret)) {
John Fastabend25331d62014-09-28 11:53:29 -0700637 qdisc_qstats_drop(sch);
Eric Dumazet338ed9b2016-06-21 23:16:51 -0700638 cl->drops++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700639 }
David S. Miller69747652008-08-17 23:55:36 -0700640 return ret;
Stephen Hemminger87990462006-08-10 23:35:16 -0700641 } else {
Stephen Hemminger87990462006-08-10 23:35:16 -0700642 htb_activate(q, cl);
643 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +0100645 sch->qstats.backlog += len;
Stephen Hemminger87990462006-08-10 23:35:16 -0700646 sch->q.qlen++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700647 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648}
649
Vimalkumar56b765b2012-10-31 06:04:11 +0000650static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
Jarek Poplawski59e42202008-12-03 21:17:27 -0800651{
Vimalkumar56b765b2012-10-31 06:04:11 +0000652 s64 toks = diff + cl->tokens;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800653
654 if (toks > cl->buffer)
655 toks = cl->buffer;
Jiri Pirko292f1c72013-02-12 00:12:03 +0000656 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
Jarek Poplawski59e42202008-12-03 21:17:27 -0800657 if (toks <= -cl->mbuffer)
658 toks = 1 - cl->mbuffer;
659
660 cl->tokens = toks;
661}
662
Vimalkumar56b765b2012-10-31 06:04:11 +0000663static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
Jarek Poplawski59e42202008-12-03 21:17:27 -0800664{
Vimalkumar56b765b2012-10-31 06:04:11 +0000665 s64 toks = diff + cl->ctokens;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800666
667 if (toks > cl->cbuffer)
668 toks = cl->cbuffer;
Jiri Pirko292f1c72013-02-12 00:12:03 +0000669 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
Jarek Poplawski59e42202008-12-03 21:17:27 -0800670 if (toks <= -cl->mbuffer)
671 toks = 1 - cl->mbuffer;
672
673 cl->ctokens = toks;
674}
675
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676/**
677 * htb_charge_class - charges amount "bytes" to leaf and ancestors
Yu Kuai0e5c9082021-06-05 18:18:43 +0800678 * @q: the priority event queue
679 * @cl: the class to start iterate
680 * @level: the minimum level to account
681 * @skb: the socket buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 *
683 * Routine assumes that packet "bytes" long was dequeued from leaf cl
684 * borrowing from "level". It accounts bytes to ceil leaky bucket for
685 * leaf and all ancestors and to rate bucket for ancestors at levels
686 * "level" and higher. It also handles possible change of mode resulting
687 * from the update. Note that mode can also increase here (MAY_BORROW to
688 * CAN_SEND) because we can use more precise clock that event queue here.
689 * In such case we remove class from event queue first.
690 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700691static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700692 int level, struct sk_buff *skb)
Stephen Hemminger87990462006-08-10 23:35:16 -0700693{
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700694 int bytes = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 enum htb_cmode old_mode;
Vimalkumar56b765b2012-10-31 06:04:11 +0000696 s64 diff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
698 while (cl) {
Vimalkumar56b765b2012-10-31 06:04:11 +0000699 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 if (cl->level >= level) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700701 if (cl->level == level)
702 cl->xstats.lends++;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800703 htb_accnt_tokens(cl, bytes, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 } else {
705 cl->xstats.borrows++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700706 cl->tokens += diff; /* we moved t_c; update tokens */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707 }
Jarek Poplawski59e42202008-12-03 21:17:27 -0800708 htb_accnt_ctokens(cl, bytes, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 cl->t_c = q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
Stephen Hemminger87990462006-08-10 23:35:16 -0700711 old_mode = cl->cmode;
712 diff = 0;
713 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 if (old_mode != cl->cmode) {
715 if (old_mode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -0700716 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700718 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
Eric Dumazetbfe0d022011-01-09 08:30:54 +0000721 /* update basic stats except for leaves which are already updated */
722 if (cl->level)
723 bstats_update(&cl->bstats, skb);
724
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 cl = cl->parent;
726 }
727}
728
729/**
730 * htb_do_events - make mode changes to classes at the level
Yu Kuai2c3ee532021-06-05 18:18:44 +0800731 * @q: the priority event queue
732 * @level: which wait_pq in 'q->hlevel'
733 * @start: start jiffies
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 *
Patrick McHardyfb983d42007-03-16 01:22:39 -0700735 * Scans event queue for pending events and applies them. Returns time of
Jarek Poplawski12247362009-02-01 01:13:22 -0800736 * next pending event (0 for no event in pq, q->now for too many events).
Patrick McHardyfb983d42007-03-16 01:22:39 -0700737 * Note: Applied are events whose have cl->pq_key <= q->now.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700739static s64 htb_do_events(struct htb_sched *q, const int level,
Eric Dumazet5343a7f2013-06-04 07:11:48 +0000740 unsigned long start)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
Martin Devera8f3ea332008-03-23 22:00:38 -0700742 /* don't run for longer than 2 jiffies; 2 is used instead of
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000743 * 1 to simplify things when jiffy is going to be incremented
744 * too soon
745 */
Jarek Poplawskia73be042009-01-12 21:54:40 -0800746 unsigned long stop_at = start + 2;
Eric Dumazetc9364632013-06-15 03:30:10 -0700747 struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
748
Martin Devera8f3ea332008-03-23 22:00:38 -0700749 while (time_before(jiffies, stop_at)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 struct htb_class *cl;
Vimalkumar56b765b2012-10-31 06:04:11 +0000751 s64 diff;
Eric Dumazetc9364632013-06-15 03:30:10 -0700752 struct rb_node *p = rb_first(wait_pq);
Akinbou Mita30bdbe32006-10-12 01:52:05 -0700753
Stephen Hemminger87990462006-08-10 23:35:16 -0700754 if (!p)
755 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
757 cl = rb_entry(p, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700758 if (cl->pq_key > q->now)
759 return cl->pq_key;
760
Eric Dumazetc9364632013-06-15 03:30:10 -0700761 htb_safe_rb_erase(p, wait_pq);
Vimalkumar56b765b2012-10-31 06:04:11 +0000762 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
Stephen Hemminger87990462006-08-10 23:35:16 -0700763 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700765 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 }
Jarek Poplawski12247362009-02-01 01:13:22 -0800767
768 /* too much load - let's continue after a break for scheduling */
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800769 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800770 pr_warn("htb: too many events!\n");
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800771 q->warned |= HTB_WARN_TOOMANYEVENTS;
772 }
Jarek Poplawski12247362009-02-01 01:13:22 -0800773
774 return q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775}
776
777/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000778 * is no such one exists.
779 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700780static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
781 u32 id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782{
783 struct rb_node *r = NULL;
784 while (n) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700785 struct htb_class *cl =
786 rb_entry(n, struct htb_class, node[prio]);
Stephen Hemminger87990462006-08-10 23:35:16 -0700787
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700788 if (id > cl->common.classid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 n = n->rb_right;
Jarek Poplawski1b5c0072008-12-09 22:34:40 -0800790 } else if (id < cl->common.classid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 r = n;
792 n = n->rb_left;
Jarek Poplawski1b5c0072008-12-09 22:34:40 -0800793 } else {
794 return n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 }
796 }
797 return r;
798}
799
800/**
801 * htb_lookup_leaf - returns next leaf class in DRR order
802 *
803 * Find leaf where current feed pointers points to.
804 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700805static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806{
807 int i;
808 struct {
809 struct rb_node *root;
810 struct rb_node **pptr;
811 u32 *pid;
Stephen Hemminger87990462006-08-10 23:35:16 -0700812 } stk[TC_HTB_MAXDEPTH], *sp = stk;
813
Eric Dumazetc9364632013-06-15 03:30:10 -0700814 BUG_ON(!hprio->row.rb_node);
815 sp->root = hprio->row.rb_node;
816 sp->pptr = &hprio->ptr;
817 sp->pid = &hprio->last_ptr_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818
819 for (i = 0; i < 65535; i++) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700820 if (!*sp->pptr && *sp->pid) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900821 /* ptr was invalidated but id is valid - try to recover
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000822 * the original or next ptr
823 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700824 *sp->pptr =
825 htb_id_find_next_upper(prio, sp->root, *sp->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700827 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000828 * can become out of date quickly
829 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700830 if (!*sp->pptr) { /* we are at right end; rewind & go up */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 *sp->pptr = sp->root;
Stephen Hemminger87990462006-08-10 23:35:16 -0700832 while ((*sp->pptr)->rb_left)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 *sp->pptr = (*sp->pptr)->rb_left;
834 if (sp > stk) {
835 sp--;
Jarek Poplawski512bb432008-12-09 22:35:02 -0800836 if (!*sp->pptr) {
837 WARN_ON(1);
Stephen Hemminger87990462006-08-10 23:35:16 -0700838 return NULL;
Jarek Poplawski512bb432008-12-09 22:35:02 -0800839 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700840 htb_next_rb_node(sp->pptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 }
842 } else {
843 struct htb_class *cl;
Eric Dumazetc9364632013-06-15 03:30:10 -0700844 struct htb_prio *clp;
845
Stephen Hemminger87990462006-08-10 23:35:16 -0700846 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
847 if (!cl->level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 return cl;
Cong Wang11957be2018-09-07 13:29:14 -0700849 clp = &cl->inner.clprio[prio];
Eric Dumazetc9364632013-06-15 03:30:10 -0700850 (++sp)->root = clp->feed.rb_node;
851 sp->pptr = &clp->ptr;
852 sp->pid = &clp->last_ptr_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 }
854 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700855 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 return NULL;
857}
858
859/* dequeues packet at given priority and level; call only if
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000860 * you are sure that there is active class at prio/level
861 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700862static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
863 const int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864{
865 struct sk_buff *skb = NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700866 struct htb_class *cl, *start;
Eric Dumazetc9364632013-06-15 03:30:10 -0700867 struct htb_level *hlevel = &q->hlevel[level];
868 struct htb_prio *hprio = &hlevel->hprio[prio];
869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 /* look initial class up in the row */
Eric Dumazetc9364632013-06-15 03:30:10 -0700871 start = cl = htb_lookup_leaf(hprio, prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700872
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 do {
874next:
Jarek Poplawski512bb432008-12-09 22:35:02 -0800875 if (unlikely(!cl))
Stephen Hemminger87990462006-08-10 23:35:16 -0700876 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878 /* class can be empty - it is unlikely but can be true if leaf
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000879 * qdisc drops packets in enqueue routine or if someone used
880 * graft operation on the leaf since last dequeue;
881 * simply deactivate and skip such class
882 */
Cong Wang11957be2018-09-07 13:29:14 -0700883 if (unlikely(cl->leaf.q->q.qlen == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 struct htb_class *next;
Stephen Hemminger87990462006-08-10 23:35:16 -0700885 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
887 /* row/level might become empty */
888 if ((q->row_mask[level] & (1 << prio)) == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -0700889 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Eric Dumazetc9364632013-06-15 03:30:10 -0700891 next = htb_lookup_leaf(hprio, prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700892
893 if (cl == start) /* fix start if we just deleted it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 start = next;
895 cl = next;
896 goto next;
897 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700898
Cong Wang11957be2018-09-07 13:29:14 -0700899 skb = cl->leaf.q->dequeue(cl->leaf.q);
Stephen Hemminger87990462006-08-10 23:35:16 -0700900 if (likely(skb != NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 break;
Jarek Poplawski633fe662008-12-03 21:09:10 -0800902
Cong Wang11957be2018-09-07 13:29:14 -0700903 qdisc_warn_nonwc("htb", cl->leaf.q);
904 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
Eric Dumazetc9364632013-06-15 03:30:10 -0700905 &q->hlevel[0].hprio[prio].ptr);
906 cl = htb_lookup_leaf(hprio, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907
908 } while (cl != start);
909
910 if (likely(skb != NULL)) {
Eric Dumazet196d97f2012-11-05 16:40:49 +0000911 bstats_update(&cl->bstats, skb);
Cong Wang11957be2018-09-07 13:29:14 -0700912 cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
913 if (cl->leaf.deficit[level] < 0) {
914 cl->leaf.deficit[level] += cl->quantum;
915 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
Eric Dumazetc9364632013-06-15 03:30:10 -0700916 &q->hlevel[0].hprio[prio].ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 }
918 /* this used to be after charge_class but this constelation
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000919 * gives us slightly better performance
920 */
Cong Wang11957be2018-09-07 13:29:14 -0700921 if (!cl->leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700922 htb_deactivate(q, cl);
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700923 htb_charge_class(q, cl, level, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 }
925 return skb;
926}
927
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928static struct sk_buff *htb_dequeue(struct Qdisc *sch)
929{
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800930 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 struct htb_sched *q = qdisc_priv(sch);
932 int level;
Eric Dumazet5343a7f2013-06-04 07:11:48 +0000933 s64 next_event;
Jarek Poplawskia73be042009-01-12 21:54:40 -0800934 unsigned long start_at;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935
936 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
Florian Westphal48da34b2016-09-18 00:57:34 +0200937 skb = __qdisc_dequeue_head(&q->direct_queue);
Stephen Hemminger87990462006-08-10 23:35:16 -0700938 if (skb != NULL) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800939ok:
940 qdisc_bstats_update(sch, skb);
WANG Cong431e3a82016-02-25 14:55:02 -0800941 qdisc_qstats_backlog_dec(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 sch->q.qlen--;
943 return skb;
944 }
945
Stephen Hemminger87990462006-08-10 23:35:16 -0700946 if (!sch->q.qlen)
947 goto fin;
Eric Dumazetd2de8752014-08-22 18:32:09 -0700948 q->now = ktime_get_ns();
Jarek Poplawskia73be042009-01-12 21:54:40 -0800949 start_at = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950
Stefan Haskod2fe85d2012-12-21 15:04:59 +0000951 next_event = q->now + 5LLU * NSEC_PER_SEC;
Jarek Poplawski633fe662008-12-03 21:09:10 -0800952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
954 /* common case optimization - skip event handler quickly */
955 int m;
Eric Dumazetc9364632013-06-15 03:30:10 -0700956 s64 event = q->near_ev_cache[level];
Stephen Hemminger87990462006-08-10 23:35:16 -0700957
Eric Dumazetc9364632013-06-15 03:30:10 -0700958 if (q->now >= event) {
Jarek Poplawskia73be042009-01-12 21:54:40 -0800959 event = htb_do_events(q, level, start_at);
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700960 if (!event)
Vimalkumar56b765b2012-10-31 06:04:11 +0000961 event = q->now + NSEC_PER_SEC;
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700962 q->near_ev_cache[level] = event;
Eric Dumazetc9364632013-06-15 03:30:10 -0700963 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700964
Jarek Poplawskic0851342009-01-12 21:54:16 -0800965 if (next_event > event)
Patrick McHardyfb983d42007-03-16 01:22:39 -0700966 next_event = event;
967
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 m = ~q->row_mask[level];
969 while (m != (int)(-1)) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700970 int prio = ffz(m);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000971
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 m |= 1 << prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700973 skb = htb_dequeue_tree(q, prio, level);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800974 if (likely(skb != NULL))
975 goto ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 }
977 }
Eric Dumazeta9efad82016-05-23 14:24:56 -0700978 if (likely(next_event > q->now))
Eric Dumazet45f50be2016-06-10 16:41:39 -0700979 qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
Eric Dumazeta9efad82016-05-23 14:24:56 -0700980 else
Jarek Poplawski12247362009-02-01 01:13:22 -0800981 schedule_work(&q->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982fin:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 return skb;
984}
985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986/* reset all classes */
987/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -0700988static void htb_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989{
990 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700991 struct htb_class *cl;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700992 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700994 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800995 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 if (cl->level)
Cong Wang11957be2018-09-07 13:29:14 -0700997 memset(&cl->inner, 0, sizeof(cl->inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 else {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200999 if (cl->leaf.q && !q->offload)
Cong Wang11957be2018-09-07 13:29:14 -07001000 qdisc_reset(cl->leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 }
1002 cl->prio_activity = 0;
1003 cl->cmode = HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 }
1005 }
Patrick McHardyfb983d42007-03-16 01:22:39 -07001006 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazeta5a9f5342016-06-13 20:21:56 -07001007 __qdisc_reset_queue(&q->direct_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 sch->q.qlen = 0;
WANG Cong431e3a82016-02-25 14:55:02 -08001009 sch->qstats.backlog = 0;
Eric Dumazetc9364632013-06-15 03:30:10 -07001010 memset(q->hlevel, 0, sizeof(q->hlevel));
Stephen Hemminger87990462006-08-10 23:35:16 -07001011 memset(q->row_mask, 0, sizeof(q->row_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012}
1013
Patrick McHardy27a34212008-01-23 20:35:39 -08001014static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
1015 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
1016 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
1017 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1018 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001019 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001020 [TCA_HTB_RATE64] = { .type = NLA_U64 },
1021 [TCA_HTB_CEIL64] = { .type = NLA_U64 },
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001022 [TCA_HTB_OFFLOAD] = { .type = NLA_FLAG },
Patrick McHardy27a34212008-01-23 20:35:39 -08001023};
1024
Jarek Poplawski12247362009-02-01 01:13:22 -08001025static void htb_work_func(struct work_struct *work)
1026{
1027 struct htb_sched *q = container_of(work, struct htb_sched, work);
1028 struct Qdisc *sch = q->watchdog.qdisc;
1029
Florian Westphal0ee13622016-06-14 06:16:27 +02001030 rcu_read_lock();
Jarek Poplawski12247362009-02-01 01:13:22 -08001031 __netif_schedule(qdisc_root(sch));
Florian Westphal0ee13622016-06-14 06:16:27 +02001032 rcu_read_unlock();
Jarek Poplawski12247362009-02-01 01:13:22 -08001033}
1034
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001035static void htb_set_lockdep_class_child(struct Qdisc *q)
1036{
1037 static struct lock_class_key child_key;
1038
1039 lockdep_set_class(qdisc_lock(q), &child_key);
1040}
1041
1042static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
1043{
1044 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
1045}
1046
Alexander Aringe63d7df2017-12-20 12:35:13 -05001047static int htb_init(struct Qdisc *sch, struct nlattr *opt,
1048 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001050 struct net_device *dev = qdisc_dev(sch);
1051 struct tc_htb_qopt_offload offload_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 struct htb_sched *q = qdisc_priv(sch);
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001053 struct nlattr *tb[TCA_HTB_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 struct tc_htb_glob *gopt;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001055 unsigned int ntx;
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001056 bool offload;
Patrick McHardycee63722008-01-23 20:33:32 -08001057 int err;
Patrick McHardycee63722008-01-23 20:33:32 -08001058
Nikolay Aleksandrov88c2ace2017-08-30 12:48:57 +03001059 qdisc_watchdog_init(&q->watchdog, sch);
1060 INIT_WORK(&q->work, htb_work_func);
1061
Patrick McHardycee63722008-01-23 20:33:32 -08001062 if (!opt)
1063 return -EINVAL;
1064
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001065 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001066 if (err)
1067 return err;
1068
Johannes Berg8cb08172019-04-26 14:07:28 +02001069 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1070 NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001071 if (err < 0)
1072 return err;
1073
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001074 if (!tb[TCA_HTB_INIT])
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 return -EINVAL;
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001076
Patrick McHardy1e904742008-01-22 22:11:17 -08001077 gopt = nla_data(tb[TCA_HTB_INIT]);
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001078 if (gopt->version != HTB_VER >> 16)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001081 offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001082
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001083 if (offload) {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001084 if (sch->parent != TC_H_ROOT)
1085 return -EOPNOTSUPP;
1086
1087 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
1088 return -EOPNOTSUPP;
1089
1090 q->num_direct_qdiscs = dev->real_num_tx_queues;
1091 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs,
1092 sizeof(*q->direct_qdiscs),
1093 GFP_KERNEL);
1094 if (!q->direct_qdiscs)
1095 return -ENOMEM;
1096 }
1097
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001098 err = qdisc_class_hash_init(&q->clhash);
1099 if (err < 0)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001100 goto err_free_direct_qdiscs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101
Florian Westphal48da34b2016-09-18 00:57:34 +02001102 qdisc_skb_head_init(&q->direct_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001104 if (tb[TCA_HTB_DIRECT_QLEN])
1105 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
Phil Sutter348e3432015-08-18 10:30:49 +02001106 else
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001107 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
Phil Sutter348e3432015-08-18 10:30:49 +02001108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1110 q->rate2quantum = 1;
1111 q->defcls = gopt->defcls;
1112
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001113 if (!offload)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001114 return 0;
1115
1116 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1117 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1118 struct Qdisc *qdisc;
1119
1120 qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1121 TC_H_MAKE(sch->handle, 0), extack);
1122 if (!qdisc) {
1123 err = -ENOMEM;
1124 goto err_free_qdiscs;
1125 }
1126
1127 htb_set_lockdep_class_child(qdisc);
1128 q->direct_qdiscs[ntx] = qdisc;
1129 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1130 }
1131
1132 sch->flags |= TCQ_F_MQROOT;
1133
1134 offload_opt = (struct tc_htb_qopt_offload) {
1135 .command = TC_HTB_CREATE,
1136 .parent_classid = TC_H_MAJ(sch->handle) >> 16,
1137 .classid = TC_H_MIN(q->defcls),
1138 .extack = extack,
1139 };
1140 err = htb_offload(dev, &offload_opt);
1141 if (err)
1142 goto err_free_qdiscs;
1143
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001144 /* Defer this assignment, so that htb_destroy skips offload-related
1145 * parts (especially calling ndo_setup_tc) on errors.
1146 */
1147 q->offload = true;
1148
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 return 0;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001150
1151err_free_qdiscs:
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001152 for (ntx = 0; ntx < q->num_direct_qdiscs && q->direct_qdiscs[ntx];
1153 ntx++)
1154 qdisc_put(q->direct_qdiscs[ntx]);
1155
1156 qdisc_class_hash_destroy(&q->clhash);
1157 /* Prevent use-after-free and double-free when htb_destroy gets called.
1158 */
1159 q->clhash.hash = NULL;
1160 q->clhash.hashsize = 0;
1161
1162err_free_direct_qdiscs:
1163 kfree(q->direct_qdiscs);
1164 q->direct_qdiscs = NULL;
1165 return err;
1166}
1167
1168static void htb_attach_offload(struct Qdisc *sch)
1169{
1170 struct net_device *dev = qdisc_dev(sch);
1171 struct htb_sched *q = qdisc_priv(sch);
1172 unsigned int ntx;
1173
1174 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1175 struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx];
1176
1177 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1178 qdisc_put(old);
1179 qdisc_hash_add(qdisc, false);
1180 }
1181 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) {
1182 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1183 struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL);
1184
1185 qdisc_put(old);
1186 }
1187
1188 kfree(q->direct_qdiscs);
1189 q->direct_qdiscs = NULL;
1190}
1191
1192static void htb_attach_software(struct Qdisc *sch)
1193{
1194 struct net_device *dev = qdisc_dev(sch);
1195 unsigned int ntx;
1196
1197 /* Resemble qdisc_graft behavior. */
1198 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1199 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1200 struct Qdisc *old = dev_graft_qdisc(dev_queue, sch);
1201
1202 qdisc_refcount_inc(sch);
1203
1204 qdisc_put(old);
1205 }
1206}
1207
1208static void htb_attach(struct Qdisc *sch)
1209{
1210 struct htb_sched *q = qdisc_priv(sch);
1211
1212 if (q->offload)
1213 htb_attach_offload(sch);
1214 else
1215 htb_attach_software(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216}
1217
1218static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1219{
1220 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001221 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 struct tc_htb_glob gopt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001224 if (q->offload)
1225 sch->flags |= TCQ_F_OFFLOADED;
1226 else
1227 sch->flags &= ~TCQ_F_OFFLOADED;
1228
Cong Wangb3624872019-05-04 11:43:42 -07001229 sch->qstats.overlimits = q->overlimits;
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001230 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1231 * no change can happen on the qdisc parameters.
1232 */
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001233
1234 gopt.direct_pkts = q->direct_pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 gopt.version = HTB_VER;
1236 gopt.rate2quantum = q->rate2quantum;
1237 gopt.defcls = q->defcls;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001238 gopt.debug = 0;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001239
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001240 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001241 if (nest == NULL)
1242 goto nla_put_failure;
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001243 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1244 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
David S. Miller1b34ec42012-03-29 05:11:39 -04001245 goto nla_put_failure;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001246 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1247 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001248
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001249 return nla_nest_end(skb, nest);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001250
Patrick McHardy1e904742008-01-22 22:11:17 -08001251nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001252 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 return -1;
1254}
1255
1256static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
Stephen Hemminger87990462006-08-10 23:35:16 -07001257 struct sk_buff *skb, struct tcmsg *tcm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258{
Stephen Hemminger87990462006-08-10 23:35:16 -07001259 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001260 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001261 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 struct tc_htb_opt opt;
1263
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001264 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1265 * no change can happen on the class parameters.
1266 */
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001267 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1268 tcm->tcm_handle = cl->common.classid;
Cong Wang11957be2018-09-07 13:29:14 -07001269 if (!cl->level && cl->leaf.q)
1270 tcm->tcm_info = cl->leaf.q->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001272 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001273 if (nest == NULL)
1274 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
Stephen Hemminger87990462006-08-10 23:35:16 -07001276 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277
Eric Dumazet01cb71d2013-06-02 13:55:05 +00001278 psched_ratecfg_getrate(&opt.rate, &cl->rate);
Jiri Pirko9c10f412013-02-12 00:12:00 +00001279 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
Eric Dumazet01cb71d2013-06-02 13:55:05 +00001280 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
Jiri Pirko9c10f412013-02-12 00:12:00 +00001281 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001282 opt.quantum = cl->quantum;
1283 opt.prio = cl->prio;
Stephen Hemminger87990462006-08-10 23:35:16 -07001284 opt.level = cl->level;
David S. Miller1b34ec42012-03-29 05:11:39 -04001285 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1286 goto nla_put_failure;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001287 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1288 goto nla_put_failure;
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001289 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001290 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1291 TCA_HTB_PAD))
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001292 goto nla_put_failure;
1293 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001294 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1295 TCA_HTB_PAD))
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001296 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001297
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001298 return nla_nest_end(skb, nest);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001299
Patrick McHardy1e904742008-01-22 22:11:17 -08001300nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001301 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 return -1;
1303}
1304
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001305static void htb_offload_aggregate_stats(struct htb_sched *q,
1306 struct htb_class *cl)
1307{
1308 struct htb_class *c;
1309 unsigned int i;
1310
1311 memset(&cl->bstats, 0, sizeof(cl->bstats));
1312
1313 for (i = 0; i < q->clhash.hashsize; i++) {
1314 hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
1315 struct htb_class *p = c;
1316
1317 while (p && p->level < cl->level)
1318 p = p->parent;
1319
1320 if (p != cl)
1321 continue;
1322
1323 cl->bstats.bytes += c->bstats_bias.bytes;
1324 cl->bstats.packets += c->bstats_bias.packets;
1325 if (c->level == 0) {
1326 cl->bstats.bytes += c->leaf.q->bstats.bytes;
1327 cl->bstats.packets += c->leaf.q->bstats.packets;
1328 }
1329 }
1330 }
1331}
1332
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333static int
Stephen Hemminger87990462006-08-10 23:35:16 -07001334htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335{
Stephen Hemminger87990462006-08-10 23:35:16 -07001336 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001337 struct htb_sched *q = qdisc_priv(sch);
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001338 struct gnet_stats_queue qs = {
1339 .drops = cl->drops,
Eric Dumazet3c75f6e2017-09-18 12:36:22 -07001340 .overlimits = cl->overlimits,
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001341 };
John Fastabend64015852014-09-28 11:53:57 -07001342 __u32 qlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
Paolo Abeni5dd431b2019-03-28 16:53:12 +01001344 if (!cl->level && cl->leaf.q)
1345 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1346
Konstantin Khlebnikov0564bf02016-07-16 17:08:56 +03001347 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1348 INT_MIN, INT_MAX);
1349 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1350 INT_MIN, INT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001351
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001352 if (q->offload) {
1353 if (!cl->level) {
1354 if (cl->leaf.q)
1355 cl->bstats = cl->leaf.q->bstats;
1356 else
1357 memset(&cl->bstats, 0, sizeof(cl->bstats));
1358 cl->bstats.bytes += cl->bstats_bias.bytes;
1359 cl->bstats.packets += cl->bstats_bias.packets;
1360 } else {
1361 htb_offload_aggregate_stats(q, cl);
1362 }
1363 }
1364
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001365 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1366 d, NULL, &cl->bstats) < 0 ||
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001367 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001368 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 return -1;
1370
1371 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1372}
1373
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001374static struct netdev_queue *
1375htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
1376{
1377 struct net_device *dev = qdisc_dev(sch);
1378 struct tc_htb_qopt_offload offload_opt;
Maxim Mikityanskiy93bde212021-03-11 16:42:05 +02001379 struct htb_sched *q = qdisc_priv(sch);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001380 int err;
1381
Maxim Mikityanskiy93bde212021-03-11 16:42:05 +02001382 if (!q->offload)
1383 return sch->dev_queue;
1384
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001385 offload_opt = (struct tc_htb_qopt_offload) {
1386 .command = TC_HTB_LEAF_QUERY_QUEUE,
1387 .classid = TC_H_MIN(tcm->tcm_parent),
1388 };
1389 err = htb_offload(dev, &offload_opt);
1390 if (err || offload_opt.qid >= dev->num_tx_queues)
1391 return NULL;
1392 return netdev_get_tx_queue(dev, offload_opt.qid);
1393}
1394
1395static struct Qdisc *
1396htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
1397{
1398 struct net_device *dev = dev_queue->dev;
1399 struct Qdisc *old_q;
1400
1401 if (dev->flags & IFF_UP)
1402 dev_deactivate(dev);
1403 old_q = dev_graft_qdisc(dev_queue, new_q);
1404 if (new_q)
1405 new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1406 if (dev->flags & IFF_UP)
1407 dev_activate(dev);
1408
1409 return old_q;
1410}
1411
1412static void htb_offload_move_qdisc(struct Qdisc *sch, u16 qid_old, u16 qid_new)
1413{
1414 struct netdev_queue *queue_old, *queue_new;
1415 struct net_device *dev = qdisc_dev(sch);
1416 struct Qdisc *qdisc;
1417
1418 queue_old = netdev_get_tx_queue(dev, qid_old);
1419 queue_new = netdev_get_tx_queue(dev, qid_new);
1420
1421 if (dev->flags & IFF_UP)
1422 dev_deactivate(dev);
1423 qdisc = dev_graft_qdisc(queue_old, NULL);
1424 qdisc->dev_queue = queue_new;
1425 qdisc = dev_graft_qdisc(queue_new, qdisc);
1426 if (dev->flags & IFF_UP)
1427 dev_activate(dev);
1428
1429 WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
1430}
1431
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -05001433 struct Qdisc **old, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001435 struct netdev_queue *dev_queue = sch->dev_queue;
Stephen Hemminger87990462006-08-10 23:35:16 -07001436 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001437 struct htb_sched *q = qdisc_priv(sch);
1438 struct Qdisc *old_q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001440 if (cl->level)
1441 return -EINVAL;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001442
1443 if (q->offload) {
1444 dev_queue = new->dev_queue;
1445 WARN_ON(dev_queue != cl->leaf.q->dev_queue);
1446 }
1447
1448 if (!new) {
1449 new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1450 cl->common.classid, extack);
1451 if (!new)
1452 return -ENOBUFS;
1453 }
1454
1455 if (q->offload) {
1456 htb_set_lockdep_class_child(new);
1457 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1458 qdisc_refcount_inc(new);
1459 old_q = htb_graft_helper(dev_queue, new);
1460 }
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001461
Cong Wang11957be2018-09-07 13:29:14 -07001462 *old = qdisc_replace(sch, new, &cl->leaf.q);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001463
1464 if (q->offload) {
1465 WARN_ON(old_q != *old);
1466 qdisc_put(old_q);
1467 }
1468
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001469 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470}
1471
Stephen Hemminger87990462006-08-10 23:35:16 -07001472static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473{
Stephen Hemminger87990462006-08-10 23:35:16 -07001474 struct htb_class *cl = (struct htb_class *)arg;
Cong Wang11957be2018-09-07 13:29:14 -07001475 return !cl->level ? cl->leaf.q : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476}
1477
Patrick McHardy256d61b2006-11-29 17:37:05 -08001478static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1479{
1480 struct htb_class *cl = (struct htb_class *)arg;
1481
Konstantin Khlebnikov95946652017-08-15 16:39:59 +03001482 htb_deactivate(qdisc_priv(sch), cl);
Patrick McHardy256d61b2006-11-29 17:37:05 -08001483}
1484
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001485static inline int htb_parent_last_child(struct htb_class *cl)
1486{
1487 if (!cl->parent)
1488 /* the root class */
1489 return 0;
Patrick McHardy42077592008-07-05 23:22:53 -07001490 if (cl->parent->children > 1)
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001491 /* not the last child */
1492 return 0;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001493 return 1;
1494}
1495
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001496static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001497 struct Qdisc *new_q)
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001498{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001499 struct htb_sched *q = qdisc_priv(sch);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001500 struct htb_class *parent = cl->parent;
1501
Cong Wang11957be2018-09-07 13:29:14 -07001502 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001503
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001504 if (parent->cmode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -07001505 htb_safe_rb_erase(&parent->pq_node,
1506 &q->hlevel[parent->level].wait_pq);
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001507
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001508 parent->level = 0;
Cong Wang11957be2018-09-07 13:29:14 -07001509 memset(&parent->inner, 0, sizeof(parent->inner));
1510 parent->leaf.q = new_q ? new_q : &noop_qdisc;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001511 parent->tokens = parent->buffer;
1512 parent->ctokens = parent->cbuffer;
Eric Dumazetd2de8752014-08-22 18:32:09 -07001513 parent->t_c = ktime_get_ns();
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001514 parent->cmode = HTB_CAN_SEND;
1515}
1516
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001517static void htb_parent_to_leaf_offload(struct Qdisc *sch,
1518 struct netdev_queue *dev_queue,
1519 struct Qdisc *new_q)
1520{
1521 struct Qdisc *old_q;
1522
1523 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1524 qdisc_refcount_inc(new_q);
1525 old_q = htb_graft_helper(dev_queue, new_q);
1526 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1527}
1528
1529static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
1530 bool last_child, bool destroying,
1531 struct netlink_ext_ack *extack)
1532{
1533 struct tc_htb_qopt_offload offload_opt;
1534 struct Qdisc *q = cl->leaf.q;
1535 struct Qdisc *old = NULL;
1536 int err;
1537
1538 if (cl->level)
1539 return -EINVAL;
1540
1541 WARN_ON(!q);
1542 if (!destroying) {
1543 /* On destroy of HTB, two cases are possible:
1544 * 1. q is a normal qdisc, but q->dev_queue has noop qdisc.
1545 * 2. q is a noop qdisc (for nodes that were inner),
1546 * q->dev_queue is noop_netdev_queue.
1547 */
1548 old = htb_graft_helper(q->dev_queue, NULL);
1549 WARN_ON(!old);
1550 WARN_ON(old != q);
1551 }
1552
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001553 if (cl->parent) {
1554 cl->parent->bstats_bias.bytes += q->bstats.bytes;
1555 cl->parent->bstats_bias.packets += q->bstats.packets;
1556 }
1557
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001558 offload_opt = (struct tc_htb_qopt_offload) {
1559 .command = !last_child ? TC_HTB_LEAF_DEL :
1560 destroying ? TC_HTB_LEAF_DEL_LAST_FORCE :
1561 TC_HTB_LEAF_DEL_LAST,
1562 .classid = cl->common.classid,
1563 .extack = extack,
1564 };
1565 err = htb_offload(qdisc_dev(sch), &offload_opt);
1566
1567 if (!err || destroying)
1568 qdisc_put(old);
1569 else
1570 htb_graft_helper(q->dev_queue, old);
1571
1572 if (last_child)
1573 return err;
1574
1575 if (!err && offload_opt.moved_qid != 0) {
1576 if (destroying)
1577 q->dev_queue = netdev_get_tx_queue(qdisc_dev(sch),
1578 offload_opt.qid);
1579 else
1580 htb_offload_move_qdisc(sch, offload_opt.moved_qid,
1581 offload_opt.qid);
1582 }
1583
1584 return err;
1585}
1586
Stephen Hemminger87990462006-08-10 23:35:16 -07001587static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 if (!cl->level) {
Cong Wang11957be2018-09-07 13:29:14 -07001590 WARN_ON(!cl->leaf.q);
Vlad Buslov86bd4462018-09-24 19:22:50 +03001591 qdisc_put(cl->leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 }
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001593 gen_kill_estimator(&cl->rate_est);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001594 tcf_block_put(cl->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 kfree(cl);
1596}
1597
Stephen Hemminger87990462006-08-10 23:35:16 -07001598static void htb_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001600 struct net_device *dev = qdisc_dev(sch);
1601 struct tc_htb_qopt_offload offload_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 struct htb_sched *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001603 struct hlist_node *next;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001604 bool nonempty, changed;
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001605 struct htb_class *cl;
1606 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607
Jarek Poplawski12247362009-02-01 01:13:22 -08001608 cancel_work_sync(&q->work);
Patrick McHardyfb983d42007-03-16 01:22:39 -07001609 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 /* This line used to be after htb_destroy_class call below
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001611 * and surprisingly it worked in 2.4. But it must precede it
1612 * because filter need its target class alive to be able to call
1613 * unbind_filter on it (without Oops).
1614 */
Jiri Pirko6529eab2017-05-17 11:07:55 +02001615 tcf_block_put(q->block);
Stephen Hemminger87990462006-08-10 23:35:16 -07001616
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001617 for (i = 0; i < q->clhash.hashsize; i++) {
Konstantin Khlebnikov89890422017-08-15 16:35:21 +03001618 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Jiri Pirko6529eab2017-05-17 11:07:55 +02001619 tcf_block_put(cl->block);
Konstantin Khlebnikov89890422017-08-15 16:35:21 +03001620 cl->block = NULL;
1621 }
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001622 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001623
1624 do {
1625 nonempty = false;
1626 changed = false;
1627 for (i = 0; i < q->clhash.hashsize; i++) {
1628 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1629 common.hnode) {
1630 bool last_child;
1631
1632 if (!q->offload) {
1633 htb_destroy_class(sch, cl);
1634 continue;
1635 }
1636
1637 nonempty = true;
1638
1639 if (cl->level)
1640 continue;
1641
1642 changed = true;
1643
1644 last_child = htb_parent_last_child(cl);
1645 htb_destroy_class_offload(sch, cl, last_child,
1646 true, NULL);
1647 qdisc_class_hash_remove(&q->clhash,
1648 &cl->common);
1649 if (cl->parent)
1650 cl->parent->children--;
1651 if (last_child)
1652 htb_parent_to_leaf(sch, cl, NULL);
1653 htb_destroy_class(sch, cl);
1654 }
1655 }
1656 } while (changed);
1657 WARN_ON(nonempty);
1658
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001659 qdisc_class_hash_destroy(&q->clhash);
Eric Dumazeta5a9f5342016-06-13 20:21:56 -07001660 __qdisc_reset_queue(&q->direct_queue);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001661
1662 if (!q->offload)
1663 return;
1664
1665 offload_opt = (struct tc_htb_qopt_offload) {
1666 .command = TC_HTB_DESTROY,
1667 };
1668 htb_offload(dev, &offload_opt);
1669
1670 if (!q->direct_qdiscs)
1671 return;
1672 for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++)
1673 qdisc_put(q->direct_qdiscs[i]);
1674 kfree(q->direct_qdiscs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001675}
1676
Maxim Mikityanskiy4dd78a72021-01-19 14:08:12 +02001677static int htb_delete(struct Qdisc *sch, unsigned long arg,
1678 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001679{
1680 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001681 struct htb_class *cl = (struct htb_class *)arg;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001682 struct Qdisc *new_q = NULL;
1683 int last_child = 0;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001684 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685
Yang Yinglianga071d272013-12-23 17:38:59 +08001686 /* TODO: why don't allow to delete subtree ? references ? does
1687 * tc subsys guarantee us that in htb_destroy it holds no class
1688 * refs so that we can remove children safely there ?
1689 */
Patrick McHardy42077592008-07-05 23:22:53 -07001690 if (cl->children || cl->filter_cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 return -EBUSY;
Stephen Hemminger87990462006-08-10 23:35:16 -07001692
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001693 if (!cl->level && htb_parent_last_child(cl))
1694 last_child = 1;
1695
1696 if (q->offload) {
1697 err = htb_destroy_class_offload(sch, cl, last_child, false,
1698 extack);
1699 if (err)
1700 return err;
1701 }
1702
1703 if (last_child) {
1704 struct netdev_queue *dev_queue;
1705
1706 dev_queue = q->offload ? cl->leaf.q->dev_queue : sch->dev_queue;
1707 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
Alexander Aringa38a98822017-12-20 12:35:21 -05001708 cl->parent->common.classid,
1709 NULL);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001710 if (q->offload) {
Yunjian Wangae81feb2021-03-30 22:27:48 +08001711 if (new_q) {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001712 htb_set_lockdep_class_child(new_q);
Yunjian Wangae81feb2021-03-30 22:27:48 +08001713 htb_parent_to_leaf_offload(sch, dev_queue, new_q);
1714 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001715 }
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001716 }
1717
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 sch_tree_lock(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001719
Paolo Abenie5f0e8f2019-03-28 16:53:13 +01001720 if (!cl->level)
1721 qdisc_purge_queue(cl->leaf.q);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001722
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001723 /* delete from hash and active; remainder in destroy_class */
1724 qdisc_class_hash_remove(&q->clhash, &cl->common);
Jarek Poplawski26b284d2008-08-13 15:16:43 -07001725 if (cl->parent)
1726 cl->parent->children--;
Patrick McHardyc38c83c2007-03-27 14:04:24 -07001727
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001729 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001731 if (cl->cmode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -07001732 htb_safe_rb_erase(&cl->pq_node,
1733 &q->hlevel[cl->level].wait_pq);
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001734
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001735 if (last_child)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001736 htb_parent_to_leaf(sch, cl, new_q);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 sch_tree_unlock(sch);
WANG Cong143976c2017-08-24 16:51:29 -07001739
1740 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 return 0;
1742}
1743
Stephen Hemminger87990462006-08-10 23:35:16 -07001744static int htb_change_class(struct Qdisc *sch, u32 classid,
Patrick McHardy1e904742008-01-22 22:11:17 -08001745 u32 parentid, struct nlattr **tca,
Alexander Aring793d81d2017-12-20 12:35:15 -05001746 unsigned long *arg, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747{
1748 int err = -EINVAL;
1749 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001750 struct htb_class *cl = (struct htb_class *)*arg, *parent;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001751 struct tc_htb_qopt_offload offload_opt;
Patrick McHardy1e904742008-01-22 22:11:17 -08001752 struct nlattr *opt = tca[TCA_OPTIONS];
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001753 struct nlattr *tb[TCA_HTB_MAX + 1];
Vlad Buslov4ce70b42019-09-24 18:51:16 +03001754 struct Qdisc *parent_qdisc = NULL;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001755 struct netdev_queue *dev_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 struct tc_htb_opt *hopt;
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001757 u64 rate64, ceil64;
Li RongQingda01ec42018-03-30 10:11:21 +08001758 int warn = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
1760 /* extract all subattrs from opt attr */
Patrick McHardycee63722008-01-23 20:33:32 -08001761 if (!opt)
1762 goto failure;
1763
Johannes Berg8cb08172019-04-26 14:07:28 +02001764 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1765 NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001766 if (err < 0)
1767 goto failure;
1768
1769 err = -EINVAL;
Patrick McHardy27a34212008-01-23 20:35:39 -08001770 if (tb[TCA_HTB_PARMS] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
Stephen Hemminger87990462006-08-10 23:35:16 -07001773 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001774
Patrick McHardy1e904742008-01-22 22:11:17 -08001775 hopt = nla_data(tb[TCA_HTB_PARMS]);
Eric Dumazet196d97f2012-11-05 16:40:49 +00001776 if (!hopt->rate.rate || !hopt->ceil.rate)
Stephen Hemminger87990462006-08-10 23:35:16 -07001777 goto failure;
1778
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +02001779 /* Keeping backward compatible with rate_table based iproute2 tc */
Yang Yingliang6b1dd852013-12-11 15:48:37 +08001780 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
Alexander Aringe9bc3fa2017-12-20 12:35:18 -05001781 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1782 NULL));
Yang Yingliang6b1dd852013-12-11 15:48:37 +08001783
1784 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
Alexander Aringe9bc3fa2017-12-20 12:35:18 -05001785 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1786 NULL));
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +02001787
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001788 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1789 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1790
Stephen Hemminger87990462006-08-10 23:35:16 -07001791 if (!cl) { /* new class */
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001792 struct net_device *dev = qdisc_dev(sch);
1793 struct Qdisc *new_q, *old_q;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001794 int prio;
Patrick McHardyee39e102007-07-02 22:48:13 -07001795 struct {
Patrick McHardy1e904742008-01-22 22:11:17 -08001796 struct nlattr nla;
Patrick McHardyee39e102007-07-02 22:48:13 -07001797 struct gnet_estimator opt;
1798 } est = {
Patrick McHardy1e904742008-01-22 22:11:17 -08001799 .nla = {
1800 .nla_len = nla_attr_size(sizeof(est.opt)),
1801 .nla_type = TCA_RATE,
Patrick McHardyee39e102007-07-02 22:48:13 -07001802 },
1803 .opt = {
1804 /* 4s interval, 16s averaging constant */
1805 .interval = 2,
1806 .ewma_log = 2,
1807 },
1808 };
Stephen Hemminger3696f622006-08-10 23:36:01 -07001809
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 /* check for valid classid */
Joe Perchesf64f9e72009-11-29 16:55:45 -08001811 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1812 htb_find(classid, sch))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 goto failure;
1814
1815 /* check maximal depth */
1816 if (parent && parent->parent && parent->parent->level < 2) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001817 pr_err("htb: tree is too deep\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 goto failure;
1819 }
1820 err = -ENOBUFS;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001821 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1822 if (!cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823 goto failure;
Stephen Hemminger87990462006-08-10 23:35:16 -07001824
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001825 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001826 if (err) {
1827 kfree(cl);
1828 goto failure;
1829 }
Eric Dumazet64153ce2013-06-06 14:53:16 -07001830 if (htb_rate_est || tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001831 err = gen_new_estimator(&cl->bstats, NULL,
1832 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001833 NULL,
1834 qdisc_root_sleeping_running(sch),
Eric Dumazet64153ce2013-06-06 14:53:16 -07001835 tca[TCA_RATE] ? : &est.nla);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001836 if (err)
1837 goto err_block_put;
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001838 }
1839
Patrick McHardy42077592008-07-05 23:22:53 -07001840 cl->children = 0;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001841 RB_CLEAR_NODE(&cl->pq_node);
1842
1843 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1844 RB_CLEAR_NODE(&cl->node[prio]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001846 cl->common.classid = classid;
1847
1848 /* Make sure nothing interrupts us in between of two
1849 * ndo_setup_tc calls.
1850 */
1851 ASSERT_RTNL();
1852
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001854 * so that can't be used inside of sch_tree_lock
1855 * -- thanks to Karlis Peisenieks
1856 */
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001857 if (!q->offload) {
1858 dev_queue = sch->dev_queue;
1859 } else if (!(parent && !parent->level)) {
1860 /* Assign a dev_queue to this classid. */
1861 offload_opt = (struct tc_htb_qopt_offload) {
1862 .command = TC_HTB_LEAF_ALLOC_QUEUE,
1863 .classid = cl->common.classid,
1864 .parent_classid = parent ?
1865 TC_H_MIN(parent->common.classid) :
1866 TC_HTB_CLASSID_ROOT,
1867 .rate = max_t(u64, hopt->rate.rate, rate64),
1868 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1869 .extack = extack,
1870 };
1871 err = htb_offload(dev, &offload_opt);
1872 if (err) {
1873 pr_err("htb: TC_HTB_LEAF_ALLOC_QUEUE failed with err = %d\n",
1874 err);
1875 goto err_kill_estimator;
1876 }
1877 dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
1878 } else { /* First child. */
1879 dev_queue = parent->leaf.q->dev_queue;
1880 old_q = htb_graft_helper(dev_queue, NULL);
1881 WARN_ON(old_q != parent->leaf.q);
1882 offload_opt = (struct tc_htb_qopt_offload) {
1883 .command = TC_HTB_LEAF_TO_INNER,
1884 .classid = cl->common.classid,
1885 .parent_classid =
1886 TC_H_MIN(parent->common.classid),
1887 .rate = max_t(u64, hopt->rate.rate, rate64),
1888 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1889 .extack = extack,
1890 };
1891 err = htb_offload(dev, &offload_opt);
1892 if (err) {
1893 pr_err("htb: TC_HTB_LEAF_TO_INNER failed with err = %d\n",
1894 err);
1895 htb_graft_helper(dev_queue, old_q);
1896 goto err_kill_estimator;
1897 }
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001898 parent->bstats_bias.bytes += old_q->bstats.bytes;
1899 parent->bstats_bias.packets += old_q->bstats.packets;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001900 qdisc_put(old_q);
1901 }
1902 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
Alexander Aringa38a98822017-12-20 12:35:21 -05001903 classid, NULL);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001904 if (q->offload) {
1905 if (new_q) {
1906 htb_set_lockdep_class_child(new_q);
1907 /* One ref for cl->leaf.q, the other for
1908 * dev_queue->qdisc.
1909 */
1910 qdisc_refcount_inc(new_q);
1911 }
1912 old_q = htb_graft_helper(dev_queue, new_q);
1913 /* No qdisc_put needed. */
1914 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 sch_tree_lock(sch);
1917 if (parent && !parent->level) {
1918 /* turn parent into inner node */
Paolo Abenie5f0e8f2019-03-28 16:53:13 +01001919 qdisc_purge_queue(parent->leaf.q);
Vlad Buslov4ce70b42019-09-24 18:51:16 +03001920 parent_qdisc = parent->leaf.q;
Stephen Hemminger87990462006-08-10 23:35:16 -07001921 if (parent->prio_activity)
1922 htb_deactivate(q, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001923
1924 /* remove from evt list because of level change */
1925 if (parent->cmode != HTB_CAN_SEND) {
Eric Dumazetc9364632013-06-15 03:30:10 -07001926 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 parent->cmode = HTB_CAN_SEND;
1928 }
1929 parent->level = (parent->parent ? parent->parent->level
Stephen Hemminger87990462006-08-10 23:35:16 -07001930 : TC_HTB_MAXDEPTH) - 1;
Cong Wang11957be2018-09-07 13:29:14 -07001931 memset(&parent->inner, 0, sizeof(parent->inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001932 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001933
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 /* leaf (we) needs elementary qdisc */
Cong Wang11957be2018-09-07 13:29:14 -07001935 cl->leaf.q = new_q ? new_q : &noop_qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Stephen Hemminger87990462006-08-10 23:35:16 -07001937 cl->parent = parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
1939 /* set class to be in HTB_CAN_SEND state */
Jiri Pirkob9a7afd2013-02-12 00:12:02 +00001940 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1941 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
Eric Dumazet5343a7f2013-06-04 07:11:48 +00001942 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
Eric Dumazetd2de8752014-08-22 18:32:09 -07001943 cl->t_c = ktime_get_ns();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944 cl->cmode = HTB_CAN_SEND;
1945
1946 /* attach to the hash list and parent's family */
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001947 qdisc_class_hash_insert(&q->clhash, &cl->common);
Patrick McHardy42077592008-07-05 23:22:53 -07001948 if (parent)
1949 parent->children++;
Cong Wang11957be2018-09-07 13:29:14 -07001950 if (cl->leaf.q != &noop_qdisc)
1951 qdisc_hash_add(cl->leaf.q, true);
Patrick McHardyee39e102007-07-02 22:48:13 -07001952 } else {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001953 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001954 err = gen_replace_estimator(&cl->bstats, NULL,
1955 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001956 NULL,
1957 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001958 tca[TCA_RATE]);
1959 if (err)
1960 return err;
1961 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001962
1963 if (q->offload) {
1964 struct net_device *dev = qdisc_dev(sch);
1965
1966 offload_opt = (struct tc_htb_qopt_offload) {
1967 .command = TC_HTB_NODE_MODIFY,
1968 .classid = cl->common.classid,
1969 .rate = max_t(u64, hopt->rate.rate, rate64),
1970 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1971 .extack = extack,
1972 };
1973 err = htb_offload(dev, &offload_opt);
1974 if (err)
1975 /* Estimator was replaced, and rollback may fail
1976 * as well, so we don't try to recover it, and
1977 * the estimator won't work property with the
1978 * offload anyway, because bstats are updated
1979 * only when the stats are queried.
1980 */
1981 return err;
1982 }
1983
Stephen Hemminger87990462006-08-10 23:35:16 -07001984 sch_tree_lock(sch);
Patrick McHardyee39e102007-07-02 22:48:13 -07001985 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986
Yang Yingliang1598f7c2013-12-10 14:59:28 +08001987 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1988 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 /* it used to be a nasty bug here, we have to check that node
Cong Wang11957be2018-09-07 13:29:14 -07001991 * is really leaf before changing cl->leaf !
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001992 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 if (!cl->level) {
Yang Yingliang1598f7c2013-12-10 14:59:28 +08001994 u64 quantum = cl->rate.rate_bytes_ps;
1995
1996 do_div(quantum, q->rate2quantum);
1997 cl->quantum = min_t(u64, quantum, INT_MAX);
1998
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001999 if (!hopt->quantum && cl->quantum < 1000) {
Li RongQingda01ec42018-03-30 10:11:21 +08002000 warn = -1;
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08002001 cl->quantum = 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 }
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08002003 if (!hopt->quantum && cl->quantum > 200000) {
Li RongQingda01ec42018-03-30 10:11:21 +08002004 warn = 1;
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08002005 cl->quantum = 200000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 }
2007 if (hopt->quantum)
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08002008 cl->quantum = hopt->quantum;
2009 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
2010 cl->prio = TC_HTB_NUMPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 }
2012
Jiri Pirko324f5aa2013-02-12 00:11:59 +00002013 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
Vimalkumarf3ad8572013-09-10 17:36:37 -07002014 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
Vimalkumar56b765b2012-10-31 06:04:11 +00002015
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 sch_tree_unlock(sch);
Vlad Buslov4ce70b42019-09-24 18:51:16 +03002017 qdisc_put(parent_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018
Li RongQingda01ec42018-03-30 10:11:21 +08002019 if (warn)
2020 pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
2021 cl->common.classid, (warn == -1 ? "small" : "big"));
2022
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002023 qdisc_class_hash_grow(sch, &q->clhash);
2024
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 *arg = (unsigned long)cl;
2026 return 0;
2027
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002028err_kill_estimator:
2029 gen_kill_estimator(&cl->rate_est);
2030err_block_put:
2031 tcf_block_put(cl->block);
2032 kfree(cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033failure:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034 return err;
2035}
2036
Alexander Aringcbaacc42017-12-20 12:35:16 -05002037static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
2038 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039{
2040 struct htb_sched *q = qdisc_priv(sch);
2041 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002042
Jiri Pirko6529eab2017-05-17 11:07:55 +02002043 return cl ? cl->block : q->block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044}
2045
2046static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
Stephen Hemminger87990462006-08-10 23:35:16 -07002047 u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048{
Stephen Hemminger87990462006-08-10 23:35:16 -07002049 struct htb_class *cl = htb_find(classid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002050
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051 /*if (cl && !cl->level) return 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00002052 * The line above used to be there to prevent attaching filters to
2053 * leaves. But at least tc_index filter uses this just to get class
2054 * for other reasons so that we have to allow for it.
2055 * ----
2056 * 19.6.2002 As Werner explained it is ok - bind filter is just
2057 * another way to "lock" the class - unlike "get" this lock can
2058 * be broken by class during destroy IIUC.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 */
Stephen Hemminger87990462006-08-10 23:35:16 -07002060 if (cl)
2061 cl->filter_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 return (unsigned long)cl;
2063}
2064
2065static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
2066{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002068
Stephen Hemminger87990462006-08-10 23:35:16 -07002069 if (cl)
2070 cl->filter_cnt--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071}
2072
2073static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2074{
2075 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002076 struct htb_class *cl;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002077 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
2079 if (arg->stop)
2080 return;
2081
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002082 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08002083 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 if (arg->count < arg->skip) {
2085 arg->count++;
2086 continue;
2087 }
2088 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2089 arg->stop = 1;
2090 return;
2091 }
2092 arg->count++;
2093 }
2094 }
2095}
2096
Eric Dumazet20fea082007-11-14 01:44:41 -08002097static const struct Qdisc_class_ops htb_class_ops = {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002098 .select_queue = htb_select_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 .graft = htb_graft,
2100 .leaf = htb_leaf,
Patrick McHardy256d61b2006-11-29 17:37:05 -08002101 .qlen_notify = htb_qlen_notify,
WANG Cong143976c2017-08-24 16:51:29 -07002102 .find = htb_search,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 .change = htb_change_class,
2104 .delete = htb_delete,
2105 .walk = htb_walk,
Jiri Pirko6529eab2017-05-17 11:07:55 +02002106 .tcf_block = htb_tcf_block,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 .bind_tcf = htb_bind_filter,
2108 .unbind_tcf = htb_unbind_filter,
2109 .dump = htb_dump_class,
2110 .dump_stats = htb_dump_class_stats,
2111};
2112
Eric Dumazet20fea082007-11-14 01:44:41 -08002113static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 .cl_ops = &htb_class_ops,
2115 .id = "htb",
2116 .priv_size = sizeof(struct htb_sched),
2117 .enqueue = htb_enqueue,
2118 .dequeue = htb_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07002119 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 .init = htb_init,
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002121 .attach = htb_attach,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 .reset = htb_reset,
2123 .destroy = htb_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 .dump = htb_dump,
2125 .owner = THIS_MODULE,
2126};
2127
2128static int __init htb_module_init(void)
2129{
Stephen Hemminger87990462006-08-10 23:35:16 -07002130 return register_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131}
Stephen Hemminger87990462006-08-10 23:35:16 -07002132static void __exit htb_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133{
Stephen Hemminger87990462006-08-10 23:35:16 -07002134 unregister_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135}
Stephen Hemminger87990462006-08-10 23:35:16 -07002136
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137module_init(htb_module_init)
2138module_exit(htb_module_exit)
2139MODULE_LICENSE("GPL");