blob: 97a9df42849e7dbefb347c425847dff4020ab782 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Stephen Hemminger87990462006-08-10 23:35:16 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors: Martin Devera, <devik@cdi.cz>
6 *
7 * Credits (in time order) for older HTB versions:
8 * Stef Coene <stef.coene@docum.org>
9 * HTB support at LARTC mailing list
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * Ondrej Kraus, <krauso@barr.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * found missing INIT_QDISC(htb)
12 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
13 * helped a lot to locate nasty class stall bug
14 * Andi Kleen, Jamal Hadi, Bert Hubert
15 * code review and helpful comments on shaping
16 * Tomasz Wrona, <tw@eter.tym.pl>
17 * created test case so that I was able to fix nasty bug
18 * Wilfried Weissmann
19 * spotted bug in dequeue code and helped with fix
20 * Jiri Fojtasek
21 * fixed requeue routine
22 * and many others. thanks.
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/module.h>
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070025#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/types.h>
27#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/skbuff.h>
31#include <linux/list.h>
32#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/rbtree.h>
Jarek Poplawski12247362009-02-01 01:13:22 -080034#include <linux/workqueue.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070036#include <net/netlink.h>
Jiri Pirko292f1c72013-02-12 00:12:03 +000037#include <net/sch_generic.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070038#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010039#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41/* HTB algorithm.
42 Author: devik@cdi.cz
43 ========================================================================
44 HTB is like TBF with multiple classes. It is also similar to CBQ because
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090045 it allows to assign priority to each class in hierarchy.
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 In fact it is another implementation of Floyd's formal sharing.
47
48 Levels:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090049 Each class is assigned level. Leaf has ALWAYS level 0 and root
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
51 one less than their parent.
52*/
53
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070054static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
Zheng Yongjun37f2ad22021-05-31 10:00:48 +080055#define HTB_VER 0x30011 /* major must be matched with number supplied by TC as version */
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#if HTB_VER >> 16 != TC_HTB_PROTOVER
58#error "Mismatched sch_htb.c and pkt_sch.h"
59#endif
60
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070061/* Module parameter and sysfs export */
62module_param (htb_hysteresis, int, 0640);
63MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
64
Eric Dumazet64153ce2013-06-06 14:53:16 -070065static int htb_rate_est = 0; /* htb classes have a default rate estimator */
66module_param(htb_rate_est, int, 0640);
67MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* used internaly to keep status of single class */
70enum htb_cmode {
Stephen Hemminger87990462006-08-10 23:35:16 -070071 HTB_CANT_SEND, /* class can't send and can't borrow */
72 HTB_MAY_BORROW, /* class can't send but may borrow */
73 HTB_CAN_SEND /* class can send */
Linus Torvalds1da177e2005-04-16 15:20:36 -070074};
75
Eric Dumazetc9364632013-06-15 03:30:10 -070076struct htb_prio {
77 union {
78 struct rb_root row;
79 struct rb_root feed;
80 };
81 struct rb_node *ptr;
82 /* When class changes from state 1->2 and disconnects from
83 * parent's feed then we lost ptr value and start from the
84 * first child again. Here we store classid of the
85 * last valid ptr (used when ptr is NULL).
86 */
87 u32 last_ptr_id;
88};
89
Eric Dumazetca4ec902013-06-13 07:58:30 -070090/* interior & leaf nodes; props specific to leaves are marked L:
91 * To reduce false sharing, place mostly read fields at beginning,
92 * and mostly written ones at the end.
93 */
Stephen Hemminger87990462006-08-10 23:35:16 -070094struct htb_class {
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -070095 struct Qdisc_class_common common;
Eric Dumazetca4ec902013-06-13 07:58:30 -070096 struct psched_ratecfg rate;
97 struct psched_ratecfg ceil;
98 s64 buffer, cbuffer;/* token bucket depth/rate */
99 s64 mbuffer; /* max wait time */
stephen hemmingercbd37552013-08-01 22:32:07 -0700100 u32 prio; /* these two are used only by leaves... */
Eric Dumazetca4ec902013-06-13 07:58:30 -0700101 int quantum; /* but stored for parent-to-leaf return */
102
John Fastabend25d8c0d2014-09-12 20:05:27 -0700103 struct tcf_proto __rcu *filter_list; /* class attached filters */
Jiri Pirko6529eab2017-05-17 11:07:55 +0200104 struct tcf_block *block;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700105 int filter_cnt;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700106
107 int level; /* our level (see above) */
108 unsigned int children;
109 struct htb_class *parent; /* parent class */
110
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800111 struct net_rate_estimator __rcu *rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Eric Dumazetca4ec902013-06-13 07:58:30 -0700113 /*
114 * Written often fields
115 */
116 struct gnet_stats_basic_packed bstats;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +0200117 struct gnet_stats_basic_packed bstats_bias;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700118 struct tc_htb_xstats xstats; /* our special stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Eric Dumazetca4ec902013-06-13 07:58:30 -0700120 /* token bucket parameters */
121 s64 tokens, ctokens;/* current number of tokens */
122 s64 t_c; /* checkpoint time */
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800123
Stephen Hemminger87990462006-08-10 23:35:16 -0700124 union {
125 struct htb_class_leaf {
Eric Dumazetc9364632013-06-15 03:30:10 -0700126 int deficit[TC_HTB_MAXDEPTH];
127 struct Qdisc *q;
Stephen Hemminger87990462006-08-10 23:35:16 -0700128 } leaf;
129 struct htb_class_inner {
Eric Dumazetc9364632013-06-15 03:30:10 -0700130 struct htb_prio clprio[TC_HTB_NUMPRIO];
Stephen Hemminger87990462006-08-10 23:35:16 -0700131 } inner;
Cong Wang11957be2018-09-07 13:29:14 -0700132 };
Eric Dumazetca4ec902013-06-13 07:58:30 -0700133 s64 pq_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Eric Dumazetca4ec902013-06-13 07:58:30 -0700135 int prio_activity; /* for which prios are we active */
136 enum htb_cmode cmode; /* current mode of the class */
137 struct rb_node pq_node; /* node for event queue */
138 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
Eric Dumazet338ed9b2016-06-21 23:16:51 -0700139
140 unsigned int drops ____cacheline_aligned_in_smp;
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700141 unsigned int overlimits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142};
143
Eric Dumazetc9364632013-06-15 03:30:10 -0700144struct htb_level {
145 struct rb_root wait_pq;
146 struct htb_prio hprio[TC_HTB_NUMPRIO];
147};
148
Stephen Hemminger87990462006-08-10 23:35:16 -0700149struct htb_sched {
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700150 struct Qdisc_class_hash clhash;
Eric Dumazetc9364632013-06-15 03:30:10 -0700151 int defcls; /* class where unclassified flows go to */
152 int rate2quantum; /* quant = rate / rate2quantum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Stephen Hemminger87990462006-08-10 23:35:16 -0700154 /* filters for qdisc itself */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700155 struct tcf_proto __rcu *filter_list;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200156 struct tcf_block *block;
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800157
158#define HTB_WARN_TOOMANYEVENTS 0x1
Eric Dumazetc9364632013-06-15 03:30:10 -0700159 unsigned int warned; /* only one warning */
160 int direct_qlen;
161 struct work_struct work;
162
163 /* non shaped skbs; let them go directly thru */
Florian Westphal48da34b2016-09-18 00:57:34 +0200164 struct qdisc_skb_head direct_queue;
Cong Wangb3624872019-05-04 11:43:42 -0700165 u32 direct_pkts;
166 u32 overlimits;
Eric Dumazetc9364632013-06-15 03:30:10 -0700167
168 struct qdisc_watchdog watchdog;
169
170 s64 now; /* cached dequeue time */
Eric Dumazetc9364632013-06-15 03:30:10 -0700171
172 /* time of nearest event per level (row) */
173 s64 near_ev_cache[TC_HTB_MAXDEPTH];
174
175 int row_mask[TC_HTB_MAXDEPTH];
176
177 struct htb_level hlevel[TC_HTB_MAXDEPTH];
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200178
179 struct Qdisc **direct_qdiscs;
180 unsigned int num_direct_qdiscs;
181
182 bool offload;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183};
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185/* find class in global hash table using given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700186static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
188 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700189 struct Qdisc_class_common *clc;
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700190
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700191 clc = qdisc_class_find(&q->clhash, handle);
192 if (clc == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 return NULL;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700194 return container_of(clc, struct htb_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195}
196
WANG Cong143976c2017-08-24 16:51:29 -0700197static unsigned long htb_search(struct Qdisc *sch, u32 handle)
198{
199 return (unsigned long)htb_find(handle, sch);
200}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201/**
202 * htb_classify - classify a packet into class
203 *
204 * It returns NULL if the packet should be dropped or -1 if the packet
205 * should be passed directly thru. In all other cases leaf class is returned.
206 * We allow direct class selection by classid in priority. The we examine
207 * filters in qdisc and in inner nodes (if higher filter points to the inner
208 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900209 * internal fifo (direct). These packets then go directly thru. If we still
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300210 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 * then finish and return direct queue.
212 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000213#define HTB_DIRECT ((struct htb_class *)-1L)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Stephen Hemminger87990462006-08-10 23:35:16 -0700215static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
216 int *qerr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
218 struct htb_sched *q = qdisc_priv(sch);
219 struct htb_class *cl;
220 struct tcf_result res;
221 struct tcf_proto *tcf;
222 int result;
223
224 /* allow to select class by setting skb->priority to valid classid;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000225 * note that nfmark can be used too by attaching filter fw with no
226 * rules in it
227 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 if (skb->priority == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700229 return HTB_DIRECT; /* X:0 (direct flow) selected */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000230 cl = htb_find(skb->priority, sch);
Harry Mason29824312014-01-17 13:22:32 +0000231 if (cl) {
232 if (cl->level == 0)
233 return cl;
234 /* Start with inner filter chain if a non-leaf class is selected */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700235 tcf = rcu_dereference_bh(cl->filter_list);
Harry Mason29824312014-01-17 13:22:32 +0000236 } else {
John Fastabend25d8c0d2014-09-12 20:05:27 -0700237 tcf = rcu_dereference_bh(q->filter_list);
Harry Mason29824312014-01-17 13:22:32 +0000238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700240 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Jiri Pirko87d83092017-05-17 11:07:54 +0200241 while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242#ifdef CONFIG_NET_CLS_ACT
243 switch (result) {
244 case TC_ACT_QUEUED:
Stephen Hemminger87990462006-08-10 23:35:16 -0700245 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +0200246 case TC_ACT_TRAP:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700247 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Gustavo A. R. Silva964201d2020-07-07 12:21:38 -0500248 fallthrough;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 case TC_ACT_SHOT:
250 return NULL;
251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252#endif
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000253 cl = (void *)res.class;
254 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 if (res.classid == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700256 return HTB_DIRECT; /* X:0 (direct flow) */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000257 cl = htb_find(res.classid, sch);
258 if (!cl)
Stephen Hemminger87990462006-08-10 23:35:16 -0700259 break; /* filter selected invalid classid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
261 if (!cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700262 return cl; /* we hit leaf; return it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264 /* we have got inner class; apply inner filter chain */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700265 tcf = rcu_dereference_bh(cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 }
267 /* classification failed; try to use default class */
Stephen Hemminger87990462006-08-10 23:35:16 -0700268 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 if (!cl || cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700270 return HTB_DIRECT; /* bad default .. this is safe bet */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 return cl;
272}
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274/**
275 * htb_add_to_id_tree - adds class to the round robin list
Yu Kuaia10541f2021-06-03 22:07:49 +0800276 * @root: the root of the tree
277 * @cl: the class to add
278 * @prio: the give prio in class
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 *
280 * Routine adds class to the list (actually tree) sorted by classid.
281 * Make sure that class is not already on such list for given prio.
282 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700283static void htb_add_to_id_tree(struct rb_root *root,
284 struct htb_class *cl, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285{
286 struct rb_node **p = &root->rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700287
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700289 struct htb_class *c;
290 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 c = rb_entry(parent, struct htb_class, node[prio]);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700292
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700293 if (cl->common.classid > c->common.classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700295 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 p = &parent->rb_left;
297 }
298 rb_link_node(&cl->node[prio], parent, p);
299 rb_insert_color(&cl->node[prio], root);
300}
301
302/**
303 * htb_add_to_wait_tree - adds class to the event queue with delay
Yu Kuai4d7efa72021-06-05 18:18:33 +0800304 * @q: the priority event queue
305 * @cl: the class to add
306 * @delay: delay in microseconds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 *
308 * The class is added to priority event queue to indicate that class will
309 * change its mode in cl->pq_key microseconds. Make sure that class is not
310 * already in the queue.
311 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700312static void htb_add_to_wait_tree(struct htb_sched *q,
Vimalkumar56b765b2012-10-31 06:04:11 +0000313 struct htb_class *cl, s64 delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314{
Eric Dumazetc9364632013-06-15 03:30:10 -0700315 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700316
Patrick McHardyfb983d42007-03-16 01:22:39 -0700317 cl->pq_key = q->now + delay;
318 if (cl->pq_key == q->now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 cl->pq_key++;
320
321 /* update the nearest event cache */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700322 if (q->near_ev_cache[cl->level] > cl->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 q->near_ev_cache[cl->level] = cl->pq_key;
Stephen Hemminger87990462006-08-10 23:35:16 -0700324
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700326 struct htb_class *c;
327 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 c = rb_entry(parent, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700329 if (cl->pq_key >= c->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700331 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 p = &parent->rb_left;
333 }
334 rb_link_node(&cl->pq_node, parent, p);
Eric Dumazetc9364632013-06-15 03:30:10 -0700335 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336}
337
338/**
339 * htb_next_rb_node - finds next node in binary tree
Yu Kuai274e5d02021-06-05 18:18:34 +0800340 * @n: the current node in binary tree
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 *
342 * When we are past last key we return NULL.
343 * Average complexity is 2 steps per call.
344 */
Stephen Hemminger3696f622006-08-10 23:36:01 -0700345static inline void htb_next_rb_node(struct rb_node **n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346{
347 *n = rb_next(*n);
348}
349
350/**
351 * htb_add_class_to_row - add class to its row
Yu Kuai996bccc2021-06-05 18:18:35 +0800352 * @q: the priority event queue
353 * @cl: the class to add
354 * @mask: the given priorities in class in bitmap
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 *
356 * The class is added to row at priorities marked in mask.
357 * It does nothing if mask == 0.
358 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700359static inline void htb_add_class_to_row(struct htb_sched *q,
360 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 q->row_mask[cl->level] |= mask;
363 while (mask) {
364 int prio = ffz(~mask);
365 mask &= ~(1 << prio);
Eric Dumazetc9364632013-06-15 03:30:10 -0700366 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 }
368}
369
Stephen Hemminger3696f622006-08-10 23:36:01 -0700370/* If this triggers, it is a bug in this code, but it need not be fatal */
371static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
372{
Ismail Donmez81771b32006-10-03 13:49:10 -0700373 if (RB_EMPTY_NODE(rb)) {
Stephen Hemminger3696f622006-08-10 23:36:01 -0700374 WARN_ON(1);
375 } else {
376 rb_erase(rb, root);
377 RB_CLEAR_NODE(rb);
378 }
379}
380
381
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382/**
383 * htb_remove_class_from_row - removes class from its row
384 *
385 * The class is removed from row at priorities marked in mask.
386 * It does nothing if mask == 0.
387 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700388static inline void htb_remove_class_from_row(struct htb_sched *q,
389 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
391 int m = 0;
Eric Dumazetc9364632013-06-15 03:30:10 -0700392 struct htb_level *hlevel = &q->hlevel[cl->level];
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700393
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 while (mask) {
395 int prio = ffz(~mask);
Eric Dumazetc9364632013-06-15 03:30:10 -0700396 struct htb_prio *hprio = &hlevel->hprio[prio];
Stephen Hemminger3696f622006-08-10 23:36:01 -0700397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 mask &= ~(1 << prio);
Eric Dumazetc9364632013-06-15 03:30:10 -0700399 if (hprio->ptr == cl->node + prio)
400 htb_next_rb_node(&hprio->ptr);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700401
Eric Dumazetc9364632013-06-15 03:30:10 -0700402 htb_safe_rb_erase(cl->node + prio, &hprio->row);
403 if (!hprio->row.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 m |= 1 << prio;
405 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 q->row_mask[cl->level] &= ~m;
407}
408
409/**
410 * htb_activate_prios - creates active classe's feed chain
411 *
412 * The class is connected to ancestors and/or appropriate rows
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900413 * for priorities it is participating on. cl->cmode must be new
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 * (activated) mode. It does nothing if cl->prio_activity == 0.
415 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700416static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417{
418 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700419 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420
421 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700422 m = mask;
423 while (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 int prio = ffz(~m);
425 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700426
Cong Wang11957be2018-09-07 13:29:14 -0700427 if (p->inner.clprio[prio].feed.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 /* parent already has its feed in use so that
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000429 * reset bit in mask as parent is already ok
430 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700432
Cong Wang11957be2018-09-07 13:29:14 -0700433 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 p->prio_activity |= mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700436 cl = p;
437 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700438
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 }
440 if (cl->cmode == HTB_CAN_SEND && mask)
Stephen Hemminger87990462006-08-10 23:35:16 -0700441 htb_add_class_to_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442}
443
444/**
445 * htb_deactivate_prios - remove class from feed chain
446 *
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900447 * cl->cmode must represent old mode (before deactivation). It does
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 * nothing if cl->prio_activity == 0. Class is removed from all feed
449 * chains and rows.
450 */
451static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
452{
453 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700454 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455
456 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700457 m = mask;
458 mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 while (m) {
460 int prio = ffz(~m);
461 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700462
Cong Wang11957be2018-09-07 13:29:14 -0700463 if (p->inner.clprio[prio].ptr == cl->node + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464 /* we are removing child which is pointed to from
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000465 * parent feed - forget the pointer but remember
466 * classid
467 */
Cong Wang11957be2018-09-07 13:29:14 -0700468 p->inner.clprio[prio].last_ptr_id = cl->common.classid;
469 p->inner.clprio[prio].ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700471
Eric Dumazetc9364632013-06-15 03:30:10 -0700472 htb_safe_rb_erase(cl->node + prio,
Cong Wang11957be2018-09-07 13:29:14 -0700473 &p->inner.clprio[prio].feed);
Stephen Hemminger87990462006-08-10 23:35:16 -0700474
Cong Wang11957be2018-09-07 13:29:14 -0700475 if (!p->inner.clprio[prio].feed.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 mask |= 1 << prio;
477 }
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700478
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 p->prio_activity &= ~mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700480 cl = p;
481 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700482
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700484 if (cl->cmode == HTB_CAN_SEND && mask)
485 htb_remove_class_from_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486}
487
Vimalkumar56b765b2012-10-31 06:04:11 +0000488static inline s64 htb_lowater(const struct htb_class *cl)
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700489{
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700490 if (htb_hysteresis)
491 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
492 else
493 return 0;
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700494}
Vimalkumar56b765b2012-10-31 06:04:11 +0000495static inline s64 htb_hiwater(const struct htb_class *cl)
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700496{
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700497 if (htb_hysteresis)
498 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
499 else
500 return 0;
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700501}
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700502
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504/**
505 * htb_class_mode - computes and returns current class mode
506 *
507 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
508 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900509 * from now to time when cl will change its state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 * Also it is worth to note that class mode doesn't change simply
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900511 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
513 * mode transitions per time unit. The speed gain is about 1/6.
514 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700515static inline enum htb_cmode
Vimalkumar56b765b2012-10-31 06:04:11 +0000516htb_class_mode(struct htb_class *cl, s64 *diff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517{
Vimalkumar56b765b2012-10-31 06:04:11 +0000518 s64 toks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519
Stephen Hemminger87990462006-08-10 23:35:16 -0700520 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
521 *diff = -toks;
522 return HTB_CANT_SEND;
523 }
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700524
Stephen Hemminger87990462006-08-10 23:35:16 -0700525 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
526 return HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Stephen Hemminger87990462006-08-10 23:35:16 -0700528 *diff = -toks;
529 return HTB_MAY_BORROW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530}
531
532/**
533 * htb_change_class_mode - changes classe's mode
534 *
535 * This should be the only way how to change classe's mode under normal
Zheng Yongjun37f2ad22021-05-31 10:00:48 +0800536 * circumstances. Routine will update feed lists linkage, change mode
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 * and add class to the wait event queue if appropriate. New mode should
538 * be different from old one and cl->pq_key has to be valid if changing
539 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
540 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700541static void
Vimalkumar56b765b2012-10-31 06:04:11 +0000542htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
Stephen Hemminger87990462006-08-10 23:35:16 -0700543{
544 enum htb_cmode new_mode = htb_class_mode(cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545
546 if (new_mode == cl->cmode)
Stephen Hemminger87990462006-08-10 23:35:16 -0700547 return;
548
Cong Wangb3624872019-05-04 11:43:42 -0700549 if (new_mode == HTB_CANT_SEND) {
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700550 cl->overlimits++;
Cong Wangb3624872019-05-04 11:43:42 -0700551 q->overlimits++;
552 }
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700553
Stephen Hemminger87990462006-08-10 23:35:16 -0700554 if (cl->prio_activity) { /* not necessary: speed optimization */
555 if (cl->cmode != HTB_CANT_SEND)
556 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 cl->cmode = new_mode;
Stephen Hemminger87990462006-08-10 23:35:16 -0700558 if (new_mode != HTB_CANT_SEND)
559 htb_activate_prios(q, cl);
560 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 cl->cmode = new_mode;
562}
563
564/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900565 * htb_activate - inserts leaf cl into appropriate active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 *
567 * Routine learns (new) priority of leaf and activates feed chain
568 * for the prio. It can be called on already active leaf safely.
569 * It also adds leaf into droplist.
570 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700571static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572{
Cong Wang11957be2018-09-07 13:29:14 -0700573 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700574
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 if (!cl->prio_activity) {
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800576 cl->prio_activity = 1 << cl->prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700577 htb_activate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578 }
579}
580
581/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900582 * htb_deactivate - remove leaf cl from active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 *
584 * Make sure that leaf is active. In the other words it can't be called
585 * with non-active leaf. It also removes class from the drop list.
586 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700587static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700589 WARN_ON(!cl->prio_activity);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700590
Stephen Hemminger87990462006-08-10 23:35:16 -0700591 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 cl->prio_activity = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593}
594
Eric Dumazet520ac302016-06-21 23:16:49 -0700595static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
596 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597{
Kees Cook3f649ab2020-06-03 13:09:38 -0700598 int ret;
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +0100599 unsigned int len = qdisc_pkt_len(skb);
Stephen Hemminger87990462006-08-10 23:35:16 -0700600 struct htb_sched *q = qdisc_priv(sch);
601 struct htb_class *cl = htb_classify(skb, sch, &ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
Stephen Hemminger87990462006-08-10 23:35:16 -0700603 if (cl == HTB_DIRECT) {
604 /* enqueue to helper queue */
605 if (q->direct_queue.qlen < q->direct_qlen) {
David S. Milleraea890b2018-07-29 16:22:13 -0700606 __qdisc_enqueue_tail(skb, &q->direct_queue);
Stephen Hemminger87990462006-08-10 23:35:16 -0700607 q->direct_pkts++;
608 } else {
Eric Dumazet520ac302016-06-21 23:16:49 -0700609 return qdisc_drop(skb, sch, to_free);
Stephen Hemminger87990462006-08-10 23:35:16 -0700610 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611#ifdef CONFIG_NET_CLS_ACT
Stephen Hemminger87990462006-08-10 23:35:16 -0700612 } else if (!cl) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700613 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700614 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700615 __qdisc_drop(skb, to_free);
Stephen Hemminger87990462006-08-10 23:35:16 -0700616 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617#endif
Cong Wang11957be2018-09-07 13:29:14 -0700618 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
Eric Dumazet520ac302016-06-21 23:16:49 -0700619 to_free)) != NET_XMIT_SUCCESS) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700620 if (net_xmit_drop_count(ret)) {
John Fastabend25331d62014-09-28 11:53:29 -0700621 qdisc_qstats_drop(sch);
Eric Dumazet338ed9b2016-06-21 23:16:51 -0700622 cl->drops++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700623 }
David S. Miller69747652008-08-17 23:55:36 -0700624 return ret;
Stephen Hemminger87990462006-08-10 23:35:16 -0700625 } else {
Stephen Hemminger87990462006-08-10 23:35:16 -0700626 htb_activate(q, cl);
627 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +0100629 sch->qstats.backlog += len;
Stephen Hemminger87990462006-08-10 23:35:16 -0700630 sch->q.qlen++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700631 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632}
633
Vimalkumar56b765b2012-10-31 06:04:11 +0000634static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
Jarek Poplawski59e42202008-12-03 21:17:27 -0800635{
Vimalkumar56b765b2012-10-31 06:04:11 +0000636 s64 toks = diff + cl->tokens;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800637
638 if (toks > cl->buffer)
639 toks = cl->buffer;
Jiri Pirko292f1c72013-02-12 00:12:03 +0000640 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
Jarek Poplawski59e42202008-12-03 21:17:27 -0800641 if (toks <= -cl->mbuffer)
642 toks = 1 - cl->mbuffer;
643
644 cl->tokens = toks;
645}
646
Vimalkumar56b765b2012-10-31 06:04:11 +0000647static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
Jarek Poplawski59e42202008-12-03 21:17:27 -0800648{
Vimalkumar56b765b2012-10-31 06:04:11 +0000649 s64 toks = diff + cl->ctokens;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800650
651 if (toks > cl->cbuffer)
652 toks = cl->cbuffer;
Jiri Pirko292f1c72013-02-12 00:12:03 +0000653 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
Jarek Poplawski59e42202008-12-03 21:17:27 -0800654 if (toks <= -cl->mbuffer)
655 toks = 1 - cl->mbuffer;
656
657 cl->ctokens = toks;
658}
659
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660/**
661 * htb_charge_class - charges amount "bytes" to leaf and ancestors
662 *
663 * Routine assumes that packet "bytes" long was dequeued from leaf cl
664 * borrowing from "level". It accounts bytes to ceil leaky bucket for
665 * leaf and all ancestors and to rate bucket for ancestors at levels
666 * "level" and higher. It also handles possible change of mode resulting
667 * from the update. Note that mode can also increase here (MAY_BORROW to
668 * CAN_SEND) because we can use more precise clock that event queue here.
669 * In such case we remove class from event queue first.
670 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700671static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700672 int level, struct sk_buff *skb)
Stephen Hemminger87990462006-08-10 23:35:16 -0700673{
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700674 int bytes = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 enum htb_cmode old_mode;
Vimalkumar56b765b2012-10-31 06:04:11 +0000676 s64 diff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677
678 while (cl) {
Vimalkumar56b765b2012-10-31 06:04:11 +0000679 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680 if (cl->level >= level) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700681 if (cl->level == level)
682 cl->xstats.lends++;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800683 htb_accnt_tokens(cl, bytes, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 } else {
685 cl->xstats.borrows++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700686 cl->tokens += diff; /* we moved t_c; update tokens */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 }
Jarek Poplawski59e42202008-12-03 21:17:27 -0800688 htb_accnt_ctokens(cl, bytes, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 cl->t_c = q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
Stephen Hemminger87990462006-08-10 23:35:16 -0700691 old_mode = cl->cmode;
692 diff = 0;
693 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 if (old_mode != cl->cmode) {
695 if (old_mode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -0700696 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700698 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
Eric Dumazetbfe0d022011-01-09 08:30:54 +0000701 /* update basic stats except for leaves which are already updated */
702 if (cl->level)
703 bstats_update(&cl->bstats, skb);
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 cl = cl->parent;
706 }
707}
708
709/**
710 * htb_do_events - make mode changes to classes at the level
711 *
Patrick McHardyfb983d42007-03-16 01:22:39 -0700712 * Scans event queue for pending events and applies them. Returns time of
Jarek Poplawski12247362009-02-01 01:13:22 -0800713 * next pending event (0 for no event in pq, q->now for too many events).
Patrick McHardyfb983d42007-03-16 01:22:39 -0700714 * Note: Applied are events whose have cl->pq_key <= q->now.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700716static s64 htb_do_events(struct htb_sched *q, const int level,
Eric Dumazet5343a7f2013-06-04 07:11:48 +0000717 unsigned long start)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718{
Martin Devera8f3ea332008-03-23 22:00:38 -0700719 /* don't run for longer than 2 jiffies; 2 is used instead of
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000720 * 1 to simplify things when jiffy is going to be incremented
721 * too soon
722 */
Jarek Poplawskia73be042009-01-12 21:54:40 -0800723 unsigned long stop_at = start + 2;
Eric Dumazetc9364632013-06-15 03:30:10 -0700724 struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
725
Martin Devera8f3ea332008-03-23 22:00:38 -0700726 while (time_before(jiffies, stop_at)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 struct htb_class *cl;
Vimalkumar56b765b2012-10-31 06:04:11 +0000728 s64 diff;
Eric Dumazetc9364632013-06-15 03:30:10 -0700729 struct rb_node *p = rb_first(wait_pq);
Akinbou Mita30bdbe32006-10-12 01:52:05 -0700730
Stephen Hemminger87990462006-08-10 23:35:16 -0700731 if (!p)
732 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733
734 cl = rb_entry(p, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700735 if (cl->pq_key > q->now)
736 return cl->pq_key;
737
Eric Dumazetc9364632013-06-15 03:30:10 -0700738 htb_safe_rb_erase(p, wait_pq);
Vimalkumar56b765b2012-10-31 06:04:11 +0000739 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
Stephen Hemminger87990462006-08-10 23:35:16 -0700740 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700742 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 }
Jarek Poplawski12247362009-02-01 01:13:22 -0800744
745 /* too much load - let's continue after a break for scheduling */
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800746 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800747 pr_warn("htb: too many events!\n");
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800748 q->warned |= HTB_WARN_TOOMANYEVENTS;
749 }
Jarek Poplawski12247362009-02-01 01:13:22 -0800750
751 return q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752}
753
754/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000755 * is no such one exists.
756 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700757static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
758 u32 id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759{
760 struct rb_node *r = NULL;
761 while (n) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700762 struct htb_class *cl =
763 rb_entry(n, struct htb_class, node[prio]);
Stephen Hemminger87990462006-08-10 23:35:16 -0700764
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700765 if (id > cl->common.classid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 n = n->rb_right;
Jarek Poplawski1b5c0072008-12-09 22:34:40 -0800767 } else if (id < cl->common.classid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768 r = n;
769 n = n->rb_left;
Jarek Poplawski1b5c0072008-12-09 22:34:40 -0800770 } else {
771 return n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 }
773 }
774 return r;
775}
776
777/**
778 * htb_lookup_leaf - returns next leaf class in DRR order
779 *
780 * Find leaf where current feed pointers points to.
781 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700782static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783{
784 int i;
785 struct {
786 struct rb_node *root;
787 struct rb_node **pptr;
788 u32 *pid;
Stephen Hemminger87990462006-08-10 23:35:16 -0700789 } stk[TC_HTB_MAXDEPTH], *sp = stk;
790
Eric Dumazetc9364632013-06-15 03:30:10 -0700791 BUG_ON(!hprio->row.rb_node);
792 sp->root = hprio->row.rb_node;
793 sp->pptr = &hprio->ptr;
794 sp->pid = &hprio->last_ptr_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
796 for (i = 0; i < 65535; i++) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700797 if (!*sp->pptr && *sp->pid) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900798 /* ptr was invalidated but id is valid - try to recover
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000799 * the original or next ptr
800 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700801 *sp->pptr =
802 htb_id_find_next_upper(prio, sp->root, *sp->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700804 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000805 * can become out of date quickly
806 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700807 if (!*sp->pptr) { /* we are at right end; rewind & go up */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 *sp->pptr = sp->root;
Stephen Hemminger87990462006-08-10 23:35:16 -0700809 while ((*sp->pptr)->rb_left)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 *sp->pptr = (*sp->pptr)->rb_left;
811 if (sp > stk) {
812 sp--;
Jarek Poplawski512bb432008-12-09 22:35:02 -0800813 if (!*sp->pptr) {
814 WARN_ON(1);
Stephen Hemminger87990462006-08-10 23:35:16 -0700815 return NULL;
Jarek Poplawski512bb432008-12-09 22:35:02 -0800816 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700817 htb_next_rb_node(sp->pptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 }
819 } else {
820 struct htb_class *cl;
Eric Dumazetc9364632013-06-15 03:30:10 -0700821 struct htb_prio *clp;
822
Stephen Hemminger87990462006-08-10 23:35:16 -0700823 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
824 if (!cl->level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 return cl;
Cong Wang11957be2018-09-07 13:29:14 -0700826 clp = &cl->inner.clprio[prio];
Eric Dumazetc9364632013-06-15 03:30:10 -0700827 (++sp)->root = clp->feed.rb_node;
828 sp->pptr = &clp->ptr;
829 sp->pid = &clp->last_ptr_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 }
831 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700832 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 return NULL;
834}
835
836/* dequeues packet at given priority and level; call only if
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000837 * you are sure that there is active class at prio/level
838 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700839static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
840 const int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841{
842 struct sk_buff *skb = NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700843 struct htb_class *cl, *start;
Eric Dumazetc9364632013-06-15 03:30:10 -0700844 struct htb_level *hlevel = &q->hlevel[level];
845 struct htb_prio *hprio = &hlevel->hprio[prio];
846
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 /* look initial class up in the row */
Eric Dumazetc9364632013-06-15 03:30:10 -0700848 start = cl = htb_lookup_leaf(hprio, prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 do {
851next:
Jarek Poplawski512bb432008-12-09 22:35:02 -0800852 if (unlikely(!cl))
Stephen Hemminger87990462006-08-10 23:35:16 -0700853 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
855 /* class can be empty - it is unlikely but can be true if leaf
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000856 * qdisc drops packets in enqueue routine or if someone used
857 * graft operation on the leaf since last dequeue;
858 * simply deactivate and skip such class
859 */
Cong Wang11957be2018-09-07 13:29:14 -0700860 if (unlikely(cl->leaf.q->q.qlen == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 struct htb_class *next;
Stephen Hemminger87990462006-08-10 23:35:16 -0700862 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864 /* row/level might become empty */
865 if ((q->row_mask[level] & (1 << prio)) == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -0700866 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
Eric Dumazetc9364632013-06-15 03:30:10 -0700868 next = htb_lookup_leaf(hprio, prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700869
870 if (cl == start) /* fix start if we just deleted it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 start = next;
872 cl = next;
873 goto next;
874 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700875
Cong Wang11957be2018-09-07 13:29:14 -0700876 skb = cl->leaf.q->dequeue(cl->leaf.q);
Stephen Hemminger87990462006-08-10 23:35:16 -0700877 if (likely(skb != NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 break;
Jarek Poplawski633fe662008-12-03 21:09:10 -0800879
Cong Wang11957be2018-09-07 13:29:14 -0700880 qdisc_warn_nonwc("htb", cl->leaf.q);
881 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
Eric Dumazetc9364632013-06-15 03:30:10 -0700882 &q->hlevel[0].hprio[prio].ptr);
883 cl = htb_lookup_leaf(hprio, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885 } while (cl != start);
886
887 if (likely(skb != NULL)) {
Eric Dumazet196d97f2012-11-05 16:40:49 +0000888 bstats_update(&cl->bstats, skb);
Cong Wang11957be2018-09-07 13:29:14 -0700889 cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
890 if (cl->leaf.deficit[level] < 0) {
891 cl->leaf.deficit[level] += cl->quantum;
892 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
Eric Dumazetc9364632013-06-15 03:30:10 -0700893 &q->hlevel[0].hprio[prio].ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 }
895 /* this used to be after charge_class but this constelation
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000896 * gives us slightly better performance
897 */
Cong Wang11957be2018-09-07 13:29:14 -0700898 if (!cl->leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700899 htb_deactivate(q, cl);
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700900 htb_charge_class(q, cl, level, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 }
902 return skb;
903}
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905static struct sk_buff *htb_dequeue(struct Qdisc *sch)
906{
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800907 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 struct htb_sched *q = qdisc_priv(sch);
909 int level;
Eric Dumazet5343a7f2013-06-04 07:11:48 +0000910 s64 next_event;
Jarek Poplawskia73be042009-01-12 21:54:40 -0800911 unsigned long start_at;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912
913 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
Florian Westphal48da34b2016-09-18 00:57:34 +0200914 skb = __qdisc_dequeue_head(&q->direct_queue);
Stephen Hemminger87990462006-08-10 23:35:16 -0700915 if (skb != NULL) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800916ok:
917 qdisc_bstats_update(sch, skb);
WANG Cong431e3a82016-02-25 14:55:02 -0800918 qdisc_qstats_backlog_dec(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 sch->q.qlen--;
920 return skb;
921 }
922
Stephen Hemminger87990462006-08-10 23:35:16 -0700923 if (!sch->q.qlen)
924 goto fin;
Eric Dumazetd2de8752014-08-22 18:32:09 -0700925 q->now = ktime_get_ns();
Jarek Poplawskia73be042009-01-12 21:54:40 -0800926 start_at = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927
Stefan Haskod2fe85d2012-12-21 15:04:59 +0000928 next_event = q->now + 5LLU * NSEC_PER_SEC;
Jarek Poplawski633fe662008-12-03 21:09:10 -0800929
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
931 /* common case optimization - skip event handler quickly */
932 int m;
Eric Dumazetc9364632013-06-15 03:30:10 -0700933 s64 event = q->near_ev_cache[level];
Stephen Hemminger87990462006-08-10 23:35:16 -0700934
Eric Dumazetc9364632013-06-15 03:30:10 -0700935 if (q->now >= event) {
Jarek Poplawskia73be042009-01-12 21:54:40 -0800936 event = htb_do_events(q, level, start_at);
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700937 if (!event)
Vimalkumar56b765b2012-10-31 06:04:11 +0000938 event = q->now + NSEC_PER_SEC;
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700939 q->near_ev_cache[level] = event;
Eric Dumazetc9364632013-06-15 03:30:10 -0700940 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700941
Jarek Poplawskic0851342009-01-12 21:54:16 -0800942 if (next_event > event)
Patrick McHardyfb983d42007-03-16 01:22:39 -0700943 next_event = event;
944
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 m = ~q->row_mask[level];
946 while (m != (int)(-1)) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700947 int prio = ffz(m);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 m |= 1 << prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700950 skb = htb_dequeue_tree(q, prio, level);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800951 if (likely(skb != NULL))
952 goto ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 }
954 }
Eric Dumazeta9efad82016-05-23 14:24:56 -0700955 if (likely(next_event > q->now))
Eric Dumazet45f50be2016-06-10 16:41:39 -0700956 qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
Eric Dumazeta9efad82016-05-23 14:24:56 -0700957 else
Jarek Poplawski12247362009-02-01 01:13:22 -0800958 schedule_work(&q->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959fin:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 return skb;
961}
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963/* reset all classes */
964/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -0700965static void htb_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966{
967 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700968 struct htb_class *cl;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700969 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700971 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800972 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 if (cl->level)
Cong Wang11957be2018-09-07 13:29:14 -0700974 memset(&cl->inner, 0, sizeof(cl->inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 else {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200976 if (cl->leaf.q && !q->offload)
Cong Wang11957be2018-09-07 13:29:14 -0700977 qdisc_reset(cl->leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 }
979 cl->prio_activity = 0;
980 cl->cmode = HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 }
982 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700983 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazeta5a9f5342016-06-13 20:21:56 -0700984 __qdisc_reset_queue(&q->direct_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 sch->q.qlen = 0;
WANG Cong431e3a82016-02-25 14:55:02 -0800986 sch->qstats.backlog = 0;
Eric Dumazetc9364632013-06-15 03:30:10 -0700987 memset(q->hlevel, 0, sizeof(q->hlevel));
Stephen Hemminger87990462006-08-10 23:35:16 -0700988 memset(q->row_mask, 0, sizeof(q->row_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989}
990
Patrick McHardy27a34212008-01-23 20:35:39 -0800991static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
992 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
993 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
994 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
995 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
Eric Dumazet6906f4e2013-03-06 06:49:21 +0000996 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
Eric Dumazetdf62cdf2013-09-19 09:10:20 -0700997 [TCA_HTB_RATE64] = { .type = NLA_U64 },
998 [TCA_HTB_CEIL64] = { .type = NLA_U64 },
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200999 [TCA_HTB_OFFLOAD] = { .type = NLA_FLAG },
Patrick McHardy27a34212008-01-23 20:35:39 -08001000};
1001
Jarek Poplawski12247362009-02-01 01:13:22 -08001002static void htb_work_func(struct work_struct *work)
1003{
1004 struct htb_sched *q = container_of(work, struct htb_sched, work);
1005 struct Qdisc *sch = q->watchdog.qdisc;
1006
Florian Westphal0ee13622016-06-14 06:16:27 +02001007 rcu_read_lock();
Jarek Poplawski12247362009-02-01 01:13:22 -08001008 __netif_schedule(qdisc_root(sch));
Florian Westphal0ee13622016-06-14 06:16:27 +02001009 rcu_read_unlock();
Jarek Poplawski12247362009-02-01 01:13:22 -08001010}
1011
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001012static void htb_set_lockdep_class_child(struct Qdisc *q)
1013{
1014 static struct lock_class_key child_key;
1015
1016 lockdep_set_class(qdisc_lock(q), &child_key);
1017}
1018
1019static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
1020{
1021 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
1022}
1023
Alexander Aringe63d7df2017-12-20 12:35:13 -05001024static int htb_init(struct Qdisc *sch, struct nlattr *opt,
1025 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001027 struct net_device *dev = qdisc_dev(sch);
1028 struct tc_htb_qopt_offload offload_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 struct htb_sched *q = qdisc_priv(sch);
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001030 struct nlattr *tb[TCA_HTB_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 struct tc_htb_glob *gopt;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001032 unsigned int ntx;
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001033 bool offload;
Patrick McHardycee63722008-01-23 20:33:32 -08001034 int err;
Patrick McHardycee63722008-01-23 20:33:32 -08001035
Nikolay Aleksandrov88c2ace2017-08-30 12:48:57 +03001036 qdisc_watchdog_init(&q->watchdog, sch);
1037 INIT_WORK(&q->work, htb_work_func);
1038
Patrick McHardycee63722008-01-23 20:33:32 -08001039 if (!opt)
1040 return -EINVAL;
1041
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001042 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001043 if (err)
1044 return err;
1045
Johannes Berg8cb08172019-04-26 14:07:28 +02001046 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1047 NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001048 if (err < 0)
1049 return err;
1050
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001051 if (!tb[TCA_HTB_INIT])
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 return -EINVAL;
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001053
Patrick McHardy1e904742008-01-22 22:11:17 -08001054 gopt = nla_data(tb[TCA_HTB_INIT]);
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001055 if (gopt->version != HTB_VER >> 16)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001058 offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001059
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001060 if (offload) {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001061 if (sch->parent != TC_H_ROOT)
1062 return -EOPNOTSUPP;
1063
1064 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
1065 return -EOPNOTSUPP;
1066
1067 q->num_direct_qdiscs = dev->real_num_tx_queues;
1068 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs,
1069 sizeof(*q->direct_qdiscs),
1070 GFP_KERNEL);
1071 if (!q->direct_qdiscs)
1072 return -ENOMEM;
1073 }
1074
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001075 err = qdisc_class_hash_init(&q->clhash);
1076 if (err < 0)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001077 goto err_free_direct_qdiscs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
Florian Westphal48da34b2016-09-18 00:57:34 +02001079 qdisc_skb_head_init(&q->direct_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001081 if (tb[TCA_HTB_DIRECT_QLEN])
1082 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
Phil Sutter348e3432015-08-18 10:30:49 +02001083 else
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001084 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
Phil Sutter348e3432015-08-18 10:30:49 +02001085
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1087 q->rate2quantum = 1;
1088 q->defcls = gopt->defcls;
1089
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001090 if (!offload)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001091 return 0;
1092
1093 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1094 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1095 struct Qdisc *qdisc;
1096
1097 qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1098 TC_H_MAKE(sch->handle, 0), extack);
1099 if (!qdisc) {
1100 err = -ENOMEM;
1101 goto err_free_qdiscs;
1102 }
1103
1104 htb_set_lockdep_class_child(qdisc);
1105 q->direct_qdiscs[ntx] = qdisc;
1106 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1107 }
1108
1109 sch->flags |= TCQ_F_MQROOT;
1110
1111 offload_opt = (struct tc_htb_qopt_offload) {
1112 .command = TC_HTB_CREATE,
1113 .parent_classid = TC_H_MAJ(sch->handle) >> 16,
1114 .classid = TC_H_MIN(q->defcls),
1115 .extack = extack,
1116 };
1117 err = htb_offload(dev, &offload_opt);
1118 if (err)
1119 goto err_free_qdiscs;
1120
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001121 /* Defer this assignment, so that htb_destroy skips offload-related
1122 * parts (especially calling ndo_setup_tc) on errors.
1123 */
1124 q->offload = true;
1125
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 return 0;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001127
1128err_free_qdiscs:
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001129 for (ntx = 0; ntx < q->num_direct_qdiscs && q->direct_qdiscs[ntx];
1130 ntx++)
1131 qdisc_put(q->direct_qdiscs[ntx]);
1132
1133 qdisc_class_hash_destroy(&q->clhash);
1134 /* Prevent use-after-free and double-free when htb_destroy gets called.
1135 */
1136 q->clhash.hash = NULL;
1137 q->clhash.hashsize = 0;
1138
1139err_free_direct_qdiscs:
1140 kfree(q->direct_qdiscs);
1141 q->direct_qdiscs = NULL;
1142 return err;
1143}
1144
1145static void htb_attach_offload(struct Qdisc *sch)
1146{
1147 struct net_device *dev = qdisc_dev(sch);
1148 struct htb_sched *q = qdisc_priv(sch);
1149 unsigned int ntx;
1150
1151 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1152 struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx];
1153
1154 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1155 qdisc_put(old);
1156 qdisc_hash_add(qdisc, false);
1157 }
1158 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) {
1159 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1160 struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL);
1161
1162 qdisc_put(old);
1163 }
1164
1165 kfree(q->direct_qdiscs);
1166 q->direct_qdiscs = NULL;
1167}
1168
1169static void htb_attach_software(struct Qdisc *sch)
1170{
1171 struct net_device *dev = qdisc_dev(sch);
1172 unsigned int ntx;
1173
1174 /* Resemble qdisc_graft behavior. */
1175 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1176 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1177 struct Qdisc *old = dev_graft_qdisc(dev_queue, sch);
1178
1179 qdisc_refcount_inc(sch);
1180
1181 qdisc_put(old);
1182 }
1183}
1184
1185static void htb_attach(struct Qdisc *sch)
1186{
1187 struct htb_sched *q = qdisc_priv(sch);
1188
1189 if (q->offload)
1190 htb_attach_offload(sch);
1191 else
1192 htb_attach_software(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193}
1194
1195static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1196{
1197 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001198 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 struct tc_htb_glob gopt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001201 if (q->offload)
1202 sch->flags |= TCQ_F_OFFLOADED;
1203 else
1204 sch->flags &= ~TCQ_F_OFFLOADED;
1205
Cong Wangb3624872019-05-04 11:43:42 -07001206 sch->qstats.overlimits = q->overlimits;
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001207 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1208 * no change can happen on the qdisc parameters.
1209 */
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001210
1211 gopt.direct_pkts = q->direct_pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212 gopt.version = HTB_VER;
1213 gopt.rate2quantum = q->rate2quantum;
1214 gopt.defcls = q->defcls;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001215 gopt.debug = 0;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001216
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001217 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001218 if (nest == NULL)
1219 goto nla_put_failure;
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001220 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1221 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
David S. Miller1b34ec42012-03-29 05:11:39 -04001222 goto nla_put_failure;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001223 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1224 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001225
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001226 return nla_nest_end(skb, nest);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001227
Patrick McHardy1e904742008-01-22 22:11:17 -08001228nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001229 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 return -1;
1231}
1232
1233static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
Stephen Hemminger87990462006-08-10 23:35:16 -07001234 struct sk_buff *skb, struct tcmsg *tcm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235{
Stephen Hemminger87990462006-08-10 23:35:16 -07001236 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001237 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001238 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 struct tc_htb_opt opt;
1240
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001241 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1242 * no change can happen on the class parameters.
1243 */
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001244 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1245 tcm->tcm_handle = cl->common.classid;
Cong Wang11957be2018-09-07 13:29:14 -07001246 if (!cl->level && cl->leaf.q)
1247 tcm->tcm_info = cl->leaf.q->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001249 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001250 if (nest == NULL)
1251 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Stephen Hemminger87990462006-08-10 23:35:16 -07001253 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
Eric Dumazet01cb71d2013-06-02 13:55:05 +00001255 psched_ratecfg_getrate(&opt.rate, &cl->rate);
Jiri Pirko9c10f412013-02-12 00:12:00 +00001256 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
Eric Dumazet01cb71d2013-06-02 13:55:05 +00001257 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
Jiri Pirko9c10f412013-02-12 00:12:00 +00001258 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001259 opt.quantum = cl->quantum;
1260 opt.prio = cl->prio;
Stephen Hemminger87990462006-08-10 23:35:16 -07001261 opt.level = cl->level;
David S. Miller1b34ec42012-03-29 05:11:39 -04001262 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1263 goto nla_put_failure;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001264 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1265 goto nla_put_failure;
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001266 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001267 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1268 TCA_HTB_PAD))
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001269 goto nla_put_failure;
1270 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001271 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1272 TCA_HTB_PAD))
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001273 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001274
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001275 return nla_nest_end(skb, nest);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001276
Patrick McHardy1e904742008-01-22 22:11:17 -08001277nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001278 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 return -1;
1280}
1281
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001282static void htb_offload_aggregate_stats(struct htb_sched *q,
1283 struct htb_class *cl)
1284{
1285 struct htb_class *c;
1286 unsigned int i;
1287
1288 memset(&cl->bstats, 0, sizeof(cl->bstats));
1289
1290 for (i = 0; i < q->clhash.hashsize; i++) {
1291 hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
1292 struct htb_class *p = c;
1293
1294 while (p && p->level < cl->level)
1295 p = p->parent;
1296
1297 if (p != cl)
1298 continue;
1299
1300 cl->bstats.bytes += c->bstats_bias.bytes;
1301 cl->bstats.packets += c->bstats_bias.packets;
1302 if (c->level == 0) {
1303 cl->bstats.bytes += c->leaf.q->bstats.bytes;
1304 cl->bstats.packets += c->leaf.q->bstats.packets;
1305 }
1306 }
1307 }
1308}
1309
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310static int
Stephen Hemminger87990462006-08-10 23:35:16 -07001311htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312{
Stephen Hemminger87990462006-08-10 23:35:16 -07001313 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001314 struct htb_sched *q = qdisc_priv(sch);
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001315 struct gnet_stats_queue qs = {
1316 .drops = cl->drops,
Eric Dumazet3c75f6e2017-09-18 12:36:22 -07001317 .overlimits = cl->overlimits,
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001318 };
John Fastabend64015852014-09-28 11:53:57 -07001319 __u32 qlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
Paolo Abeni5dd431b2019-03-28 16:53:12 +01001321 if (!cl->level && cl->leaf.q)
1322 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1323
Konstantin Khlebnikov0564bf02016-07-16 17:08:56 +03001324 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1325 INT_MIN, INT_MAX);
1326 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1327 INT_MIN, INT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001329 if (q->offload) {
1330 if (!cl->level) {
1331 if (cl->leaf.q)
1332 cl->bstats = cl->leaf.q->bstats;
1333 else
1334 memset(&cl->bstats, 0, sizeof(cl->bstats));
1335 cl->bstats.bytes += cl->bstats_bias.bytes;
1336 cl->bstats.packets += cl->bstats_bias.packets;
1337 } else {
1338 htb_offload_aggregate_stats(q, cl);
1339 }
1340 }
1341
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001342 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1343 d, NULL, &cl->bstats) < 0 ||
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001344 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001345 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 return -1;
1347
1348 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1349}
1350
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001351static struct netdev_queue *
1352htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
1353{
1354 struct net_device *dev = qdisc_dev(sch);
1355 struct tc_htb_qopt_offload offload_opt;
Maxim Mikityanskiy93bde212021-03-11 16:42:05 +02001356 struct htb_sched *q = qdisc_priv(sch);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001357 int err;
1358
Maxim Mikityanskiy93bde212021-03-11 16:42:05 +02001359 if (!q->offload)
1360 return sch->dev_queue;
1361
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001362 offload_opt = (struct tc_htb_qopt_offload) {
1363 .command = TC_HTB_LEAF_QUERY_QUEUE,
1364 .classid = TC_H_MIN(tcm->tcm_parent),
1365 };
1366 err = htb_offload(dev, &offload_opt);
1367 if (err || offload_opt.qid >= dev->num_tx_queues)
1368 return NULL;
1369 return netdev_get_tx_queue(dev, offload_opt.qid);
1370}
1371
1372static struct Qdisc *
1373htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
1374{
1375 struct net_device *dev = dev_queue->dev;
1376 struct Qdisc *old_q;
1377
1378 if (dev->flags & IFF_UP)
1379 dev_deactivate(dev);
1380 old_q = dev_graft_qdisc(dev_queue, new_q);
1381 if (new_q)
1382 new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1383 if (dev->flags & IFF_UP)
1384 dev_activate(dev);
1385
1386 return old_q;
1387}
1388
1389static void htb_offload_move_qdisc(struct Qdisc *sch, u16 qid_old, u16 qid_new)
1390{
1391 struct netdev_queue *queue_old, *queue_new;
1392 struct net_device *dev = qdisc_dev(sch);
1393 struct Qdisc *qdisc;
1394
1395 queue_old = netdev_get_tx_queue(dev, qid_old);
1396 queue_new = netdev_get_tx_queue(dev, qid_new);
1397
1398 if (dev->flags & IFF_UP)
1399 dev_deactivate(dev);
1400 qdisc = dev_graft_qdisc(queue_old, NULL);
1401 qdisc->dev_queue = queue_new;
1402 qdisc = dev_graft_qdisc(queue_new, qdisc);
1403 if (dev->flags & IFF_UP)
1404 dev_activate(dev);
1405
1406 WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
1407}
1408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -05001410 struct Qdisc **old, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001412 struct netdev_queue *dev_queue = sch->dev_queue;
Stephen Hemminger87990462006-08-10 23:35:16 -07001413 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001414 struct htb_sched *q = qdisc_priv(sch);
1415 struct Qdisc *old_q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001417 if (cl->level)
1418 return -EINVAL;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001419
1420 if (q->offload) {
1421 dev_queue = new->dev_queue;
1422 WARN_ON(dev_queue != cl->leaf.q->dev_queue);
1423 }
1424
1425 if (!new) {
1426 new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1427 cl->common.classid, extack);
1428 if (!new)
1429 return -ENOBUFS;
1430 }
1431
1432 if (q->offload) {
1433 htb_set_lockdep_class_child(new);
1434 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1435 qdisc_refcount_inc(new);
1436 old_q = htb_graft_helper(dev_queue, new);
1437 }
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001438
Cong Wang11957be2018-09-07 13:29:14 -07001439 *old = qdisc_replace(sch, new, &cl->leaf.q);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001440
1441 if (q->offload) {
1442 WARN_ON(old_q != *old);
1443 qdisc_put(old_q);
1444 }
1445
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001446 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447}
1448
Stephen Hemminger87990462006-08-10 23:35:16 -07001449static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450{
Stephen Hemminger87990462006-08-10 23:35:16 -07001451 struct htb_class *cl = (struct htb_class *)arg;
Cong Wang11957be2018-09-07 13:29:14 -07001452 return !cl->level ? cl->leaf.q : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453}
1454
Patrick McHardy256d61b2006-11-29 17:37:05 -08001455static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1456{
1457 struct htb_class *cl = (struct htb_class *)arg;
1458
Konstantin Khlebnikov95946652017-08-15 16:39:59 +03001459 htb_deactivate(qdisc_priv(sch), cl);
Patrick McHardy256d61b2006-11-29 17:37:05 -08001460}
1461
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001462static inline int htb_parent_last_child(struct htb_class *cl)
1463{
1464 if (!cl->parent)
1465 /* the root class */
1466 return 0;
Patrick McHardy42077592008-07-05 23:22:53 -07001467 if (cl->parent->children > 1)
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001468 /* not the last child */
1469 return 0;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001470 return 1;
1471}
1472
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001473static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001474 struct Qdisc *new_q)
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001475{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001476 struct htb_sched *q = qdisc_priv(sch);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001477 struct htb_class *parent = cl->parent;
1478
Cong Wang11957be2018-09-07 13:29:14 -07001479 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001480
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001481 if (parent->cmode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -07001482 htb_safe_rb_erase(&parent->pq_node,
1483 &q->hlevel[parent->level].wait_pq);
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001484
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001485 parent->level = 0;
Cong Wang11957be2018-09-07 13:29:14 -07001486 memset(&parent->inner, 0, sizeof(parent->inner));
1487 parent->leaf.q = new_q ? new_q : &noop_qdisc;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001488 parent->tokens = parent->buffer;
1489 parent->ctokens = parent->cbuffer;
Eric Dumazetd2de8752014-08-22 18:32:09 -07001490 parent->t_c = ktime_get_ns();
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001491 parent->cmode = HTB_CAN_SEND;
1492}
1493
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001494static void htb_parent_to_leaf_offload(struct Qdisc *sch,
1495 struct netdev_queue *dev_queue,
1496 struct Qdisc *new_q)
1497{
1498 struct Qdisc *old_q;
1499
1500 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1501 qdisc_refcount_inc(new_q);
1502 old_q = htb_graft_helper(dev_queue, new_q);
1503 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1504}
1505
1506static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
1507 bool last_child, bool destroying,
1508 struct netlink_ext_ack *extack)
1509{
1510 struct tc_htb_qopt_offload offload_opt;
1511 struct Qdisc *q = cl->leaf.q;
1512 struct Qdisc *old = NULL;
1513 int err;
1514
1515 if (cl->level)
1516 return -EINVAL;
1517
1518 WARN_ON(!q);
1519 if (!destroying) {
1520 /* On destroy of HTB, two cases are possible:
1521 * 1. q is a normal qdisc, but q->dev_queue has noop qdisc.
1522 * 2. q is a noop qdisc (for nodes that were inner),
1523 * q->dev_queue is noop_netdev_queue.
1524 */
1525 old = htb_graft_helper(q->dev_queue, NULL);
1526 WARN_ON(!old);
1527 WARN_ON(old != q);
1528 }
1529
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001530 if (cl->parent) {
1531 cl->parent->bstats_bias.bytes += q->bstats.bytes;
1532 cl->parent->bstats_bias.packets += q->bstats.packets;
1533 }
1534
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001535 offload_opt = (struct tc_htb_qopt_offload) {
1536 .command = !last_child ? TC_HTB_LEAF_DEL :
1537 destroying ? TC_HTB_LEAF_DEL_LAST_FORCE :
1538 TC_HTB_LEAF_DEL_LAST,
1539 .classid = cl->common.classid,
1540 .extack = extack,
1541 };
1542 err = htb_offload(qdisc_dev(sch), &offload_opt);
1543
1544 if (!err || destroying)
1545 qdisc_put(old);
1546 else
1547 htb_graft_helper(q->dev_queue, old);
1548
1549 if (last_child)
1550 return err;
1551
1552 if (!err && offload_opt.moved_qid != 0) {
1553 if (destroying)
1554 q->dev_queue = netdev_get_tx_queue(qdisc_dev(sch),
1555 offload_opt.qid);
1556 else
1557 htb_offload_move_qdisc(sch, offload_opt.moved_qid,
1558 offload_opt.qid);
1559 }
1560
1561 return err;
1562}
1563
Stephen Hemminger87990462006-08-10 23:35:16 -07001564static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001566 if (!cl->level) {
Cong Wang11957be2018-09-07 13:29:14 -07001567 WARN_ON(!cl->leaf.q);
Vlad Buslov86bd4462018-09-24 19:22:50 +03001568 qdisc_put(cl->leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001569 }
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001570 gen_kill_estimator(&cl->rate_est);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001571 tcf_block_put(cl->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 kfree(cl);
1573}
1574
Stephen Hemminger87990462006-08-10 23:35:16 -07001575static void htb_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001577 struct net_device *dev = qdisc_dev(sch);
1578 struct tc_htb_qopt_offload offload_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 struct htb_sched *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001580 struct hlist_node *next;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001581 bool nonempty, changed;
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001582 struct htb_class *cl;
1583 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584
Jarek Poplawski12247362009-02-01 01:13:22 -08001585 cancel_work_sync(&q->work);
Patrick McHardyfb983d42007-03-16 01:22:39 -07001586 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587 /* This line used to be after htb_destroy_class call below
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001588 * and surprisingly it worked in 2.4. But it must precede it
1589 * because filter need its target class alive to be able to call
1590 * unbind_filter on it (without Oops).
1591 */
Jiri Pirko6529eab2017-05-17 11:07:55 +02001592 tcf_block_put(q->block);
Stephen Hemminger87990462006-08-10 23:35:16 -07001593
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001594 for (i = 0; i < q->clhash.hashsize; i++) {
Konstantin Khlebnikov89890422017-08-15 16:35:21 +03001595 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Jiri Pirko6529eab2017-05-17 11:07:55 +02001596 tcf_block_put(cl->block);
Konstantin Khlebnikov89890422017-08-15 16:35:21 +03001597 cl->block = NULL;
1598 }
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001599 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001600
1601 do {
1602 nonempty = false;
1603 changed = false;
1604 for (i = 0; i < q->clhash.hashsize; i++) {
1605 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1606 common.hnode) {
1607 bool last_child;
1608
1609 if (!q->offload) {
1610 htb_destroy_class(sch, cl);
1611 continue;
1612 }
1613
1614 nonempty = true;
1615
1616 if (cl->level)
1617 continue;
1618
1619 changed = true;
1620
1621 last_child = htb_parent_last_child(cl);
1622 htb_destroy_class_offload(sch, cl, last_child,
1623 true, NULL);
1624 qdisc_class_hash_remove(&q->clhash,
1625 &cl->common);
1626 if (cl->parent)
1627 cl->parent->children--;
1628 if (last_child)
1629 htb_parent_to_leaf(sch, cl, NULL);
1630 htb_destroy_class(sch, cl);
1631 }
1632 }
1633 } while (changed);
1634 WARN_ON(nonempty);
1635
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001636 qdisc_class_hash_destroy(&q->clhash);
Eric Dumazeta5a9f5342016-06-13 20:21:56 -07001637 __qdisc_reset_queue(&q->direct_queue);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001638
1639 if (!q->offload)
1640 return;
1641
1642 offload_opt = (struct tc_htb_qopt_offload) {
1643 .command = TC_HTB_DESTROY,
1644 };
1645 htb_offload(dev, &offload_opt);
1646
1647 if (!q->direct_qdiscs)
1648 return;
1649 for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++)
1650 qdisc_put(q->direct_qdiscs[i]);
1651 kfree(q->direct_qdiscs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652}
1653
Maxim Mikityanskiy4dd78a72021-01-19 14:08:12 +02001654static int htb_delete(struct Qdisc *sch, unsigned long arg,
1655 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656{
1657 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001658 struct htb_class *cl = (struct htb_class *)arg;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001659 struct Qdisc *new_q = NULL;
1660 int last_child = 0;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001661 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001662
Yang Yinglianga071d272013-12-23 17:38:59 +08001663 /* TODO: why don't allow to delete subtree ? references ? does
1664 * tc subsys guarantee us that in htb_destroy it holds no class
1665 * refs so that we can remove children safely there ?
1666 */
Patrick McHardy42077592008-07-05 23:22:53 -07001667 if (cl->children || cl->filter_cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 return -EBUSY;
Stephen Hemminger87990462006-08-10 23:35:16 -07001669
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001670 if (!cl->level && htb_parent_last_child(cl))
1671 last_child = 1;
1672
1673 if (q->offload) {
1674 err = htb_destroy_class_offload(sch, cl, last_child, false,
1675 extack);
1676 if (err)
1677 return err;
1678 }
1679
1680 if (last_child) {
1681 struct netdev_queue *dev_queue;
1682
1683 dev_queue = q->offload ? cl->leaf.q->dev_queue : sch->dev_queue;
1684 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
Alexander Aringa38a98822017-12-20 12:35:21 -05001685 cl->parent->common.classid,
1686 NULL);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001687 if (q->offload) {
Yunjian Wangae81feb2021-03-30 22:27:48 +08001688 if (new_q) {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001689 htb_set_lockdep_class_child(new_q);
Yunjian Wangae81feb2021-03-30 22:27:48 +08001690 htb_parent_to_leaf_offload(sch, dev_queue, new_q);
1691 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001692 }
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001693 }
1694
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 sch_tree_lock(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001696
Paolo Abenie5f0e8f2019-03-28 16:53:13 +01001697 if (!cl->level)
1698 qdisc_purge_queue(cl->leaf.q);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001699
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001700 /* delete from hash and active; remainder in destroy_class */
1701 qdisc_class_hash_remove(&q->clhash, &cl->common);
Jarek Poplawski26b284d2008-08-13 15:16:43 -07001702 if (cl->parent)
1703 cl->parent->children--;
Patrick McHardyc38c83c2007-03-27 14:04:24 -07001704
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001706 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001708 if (cl->cmode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -07001709 htb_safe_rb_erase(&cl->pq_node,
1710 &q->hlevel[cl->level].wait_pq);
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001711
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001712 if (last_child)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001713 htb_parent_to_leaf(sch, cl, new_q);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001714
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 sch_tree_unlock(sch);
WANG Cong143976c2017-08-24 16:51:29 -07001716
1717 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 return 0;
1719}
1720
Stephen Hemminger87990462006-08-10 23:35:16 -07001721static int htb_change_class(struct Qdisc *sch, u32 classid,
Patrick McHardy1e904742008-01-22 22:11:17 -08001722 u32 parentid, struct nlattr **tca,
Alexander Aring793d81d2017-12-20 12:35:15 -05001723 unsigned long *arg, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724{
1725 int err = -EINVAL;
1726 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001727 struct htb_class *cl = (struct htb_class *)*arg, *parent;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001728 struct tc_htb_qopt_offload offload_opt;
Patrick McHardy1e904742008-01-22 22:11:17 -08001729 struct nlattr *opt = tca[TCA_OPTIONS];
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001730 struct nlattr *tb[TCA_HTB_MAX + 1];
Vlad Buslov4ce70b42019-09-24 18:51:16 +03001731 struct Qdisc *parent_qdisc = NULL;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001732 struct netdev_queue *dev_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 struct tc_htb_opt *hopt;
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001734 u64 rate64, ceil64;
Li RongQingda01ec42018-03-30 10:11:21 +08001735 int warn = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736
1737 /* extract all subattrs from opt attr */
Patrick McHardycee63722008-01-23 20:33:32 -08001738 if (!opt)
1739 goto failure;
1740
Johannes Berg8cb08172019-04-26 14:07:28 +02001741 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1742 NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001743 if (err < 0)
1744 goto failure;
1745
1746 err = -EINVAL;
Patrick McHardy27a34212008-01-23 20:35:39 -08001747 if (tb[TCA_HTB_PARMS] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749
Stephen Hemminger87990462006-08-10 23:35:16 -07001750 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001751
Patrick McHardy1e904742008-01-22 22:11:17 -08001752 hopt = nla_data(tb[TCA_HTB_PARMS]);
Eric Dumazet196d97f2012-11-05 16:40:49 +00001753 if (!hopt->rate.rate || !hopt->ceil.rate)
Stephen Hemminger87990462006-08-10 23:35:16 -07001754 goto failure;
1755
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +02001756 /* Keeping backward compatible with rate_table based iproute2 tc */
Yang Yingliang6b1dd852013-12-11 15:48:37 +08001757 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
Alexander Aringe9bc3fa2017-12-20 12:35:18 -05001758 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1759 NULL));
Yang Yingliang6b1dd852013-12-11 15:48:37 +08001760
1761 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
Alexander Aringe9bc3fa2017-12-20 12:35:18 -05001762 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1763 NULL));
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +02001764
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001765 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1766 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1767
Stephen Hemminger87990462006-08-10 23:35:16 -07001768 if (!cl) { /* new class */
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001769 struct net_device *dev = qdisc_dev(sch);
1770 struct Qdisc *new_q, *old_q;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001771 int prio;
Patrick McHardyee39e102007-07-02 22:48:13 -07001772 struct {
Patrick McHardy1e904742008-01-22 22:11:17 -08001773 struct nlattr nla;
Patrick McHardyee39e102007-07-02 22:48:13 -07001774 struct gnet_estimator opt;
1775 } est = {
Patrick McHardy1e904742008-01-22 22:11:17 -08001776 .nla = {
1777 .nla_len = nla_attr_size(sizeof(est.opt)),
1778 .nla_type = TCA_RATE,
Patrick McHardyee39e102007-07-02 22:48:13 -07001779 },
1780 .opt = {
1781 /* 4s interval, 16s averaging constant */
1782 .interval = 2,
1783 .ewma_log = 2,
1784 },
1785 };
Stephen Hemminger3696f622006-08-10 23:36:01 -07001786
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 /* check for valid classid */
Joe Perchesf64f9e72009-11-29 16:55:45 -08001788 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1789 htb_find(classid, sch))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 goto failure;
1791
1792 /* check maximal depth */
1793 if (parent && parent->parent && parent->parent->level < 2) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001794 pr_err("htb: tree is too deep\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 goto failure;
1796 }
1797 err = -ENOBUFS;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001798 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1799 if (!cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 goto failure;
Stephen Hemminger87990462006-08-10 23:35:16 -07001801
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001802 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001803 if (err) {
1804 kfree(cl);
1805 goto failure;
1806 }
Eric Dumazet64153ce2013-06-06 14:53:16 -07001807 if (htb_rate_est || tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001808 err = gen_new_estimator(&cl->bstats, NULL,
1809 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001810 NULL,
1811 qdisc_root_sleeping_running(sch),
Eric Dumazet64153ce2013-06-06 14:53:16 -07001812 tca[TCA_RATE] ? : &est.nla);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001813 if (err)
1814 goto err_block_put;
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001815 }
1816
Patrick McHardy42077592008-07-05 23:22:53 -07001817 cl->children = 0;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001818 RB_CLEAR_NODE(&cl->pq_node);
1819
1820 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1821 RB_CLEAR_NODE(&cl->node[prio]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001823 cl->common.classid = classid;
1824
1825 /* Make sure nothing interrupts us in between of two
1826 * ndo_setup_tc calls.
1827 */
1828 ASSERT_RTNL();
1829
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001831 * so that can't be used inside of sch_tree_lock
1832 * -- thanks to Karlis Peisenieks
1833 */
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001834 if (!q->offload) {
1835 dev_queue = sch->dev_queue;
1836 } else if (!(parent && !parent->level)) {
1837 /* Assign a dev_queue to this classid. */
1838 offload_opt = (struct tc_htb_qopt_offload) {
1839 .command = TC_HTB_LEAF_ALLOC_QUEUE,
1840 .classid = cl->common.classid,
1841 .parent_classid = parent ?
1842 TC_H_MIN(parent->common.classid) :
1843 TC_HTB_CLASSID_ROOT,
1844 .rate = max_t(u64, hopt->rate.rate, rate64),
1845 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1846 .extack = extack,
1847 };
1848 err = htb_offload(dev, &offload_opt);
1849 if (err) {
1850 pr_err("htb: TC_HTB_LEAF_ALLOC_QUEUE failed with err = %d\n",
1851 err);
1852 goto err_kill_estimator;
1853 }
1854 dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
1855 } else { /* First child. */
1856 dev_queue = parent->leaf.q->dev_queue;
1857 old_q = htb_graft_helper(dev_queue, NULL);
1858 WARN_ON(old_q != parent->leaf.q);
1859 offload_opt = (struct tc_htb_qopt_offload) {
1860 .command = TC_HTB_LEAF_TO_INNER,
1861 .classid = cl->common.classid,
1862 .parent_classid =
1863 TC_H_MIN(parent->common.classid),
1864 .rate = max_t(u64, hopt->rate.rate, rate64),
1865 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1866 .extack = extack,
1867 };
1868 err = htb_offload(dev, &offload_opt);
1869 if (err) {
1870 pr_err("htb: TC_HTB_LEAF_TO_INNER failed with err = %d\n",
1871 err);
1872 htb_graft_helper(dev_queue, old_q);
1873 goto err_kill_estimator;
1874 }
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001875 parent->bstats_bias.bytes += old_q->bstats.bytes;
1876 parent->bstats_bias.packets += old_q->bstats.packets;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001877 qdisc_put(old_q);
1878 }
1879 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
Alexander Aringa38a98822017-12-20 12:35:21 -05001880 classid, NULL);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001881 if (q->offload) {
1882 if (new_q) {
1883 htb_set_lockdep_class_child(new_q);
1884 /* One ref for cl->leaf.q, the other for
1885 * dev_queue->qdisc.
1886 */
1887 qdisc_refcount_inc(new_q);
1888 }
1889 old_q = htb_graft_helper(dev_queue, new_q);
1890 /* No qdisc_put needed. */
1891 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1892 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 sch_tree_lock(sch);
1894 if (parent && !parent->level) {
1895 /* turn parent into inner node */
Paolo Abenie5f0e8f2019-03-28 16:53:13 +01001896 qdisc_purge_queue(parent->leaf.q);
Vlad Buslov4ce70b42019-09-24 18:51:16 +03001897 parent_qdisc = parent->leaf.q;
Stephen Hemminger87990462006-08-10 23:35:16 -07001898 if (parent->prio_activity)
1899 htb_deactivate(q, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900
1901 /* remove from evt list because of level change */
1902 if (parent->cmode != HTB_CAN_SEND) {
Eric Dumazetc9364632013-06-15 03:30:10 -07001903 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 parent->cmode = HTB_CAN_SEND;
1905 }
1906 parent->level = (parent->parent ? parent->parent->level
Stephen Hemminger87990462006-08-10 23:35:16 -07001907 : TC_HTB_MAXDEPTH) - 1;
Cong Wang11957be2018-09-07 13:29:14 -07001908 memset(&parent->inner, 0, sizeof(parent->inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001910
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 /* leaf (we) needs elementary qdisc */
Cong Wang11957be2018-09-07 13:29:14 -07001912 cl->leaf.q = new_q ? new_q : &noop_qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001913
Stephen Hemminger87990462006-08-10 23:35:16 -07001914 cl->parent = parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915
1916 /* set class to be in HTB_CAN_SEND state */
Jiri Pirkob9a7afd2013-02-12 00:12:02 +00001917 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1918 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
Eric Dumazet5343a7f2013-06-04 07:11:48 +00001919 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
Eric Dumazetd2de8752014-08-22 18:32:09 -07001920 cl->t_c = ktime_get_ns();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 cl->cmode = HTB_CAN_SEND;
1922
1923 /* attach to the hash list and parent's family */
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001924 qdisc_class_hash_insert(&q->clhash, &cl->common);
Patrick McHardy42077592008-07-05 23:22:53 -07001925 if (parent)
1926 parent->children++;
Cong Wang11957be2018-09-07 13:29:14 -07001927 if (cl->leaf.q != &noop_qdisc)
1928 qdisc_hash_add(cl->leaf.q, true);
Patrick McHardyee39e102007-07-02 22:48:13 -07001929 } else {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001930 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001931 err = gen_replace_estimator(&cl->bstats, NULL,
1932 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001933 NULL,
1934 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001935 tca[TCA_RATE]);
1936 if (err)
1937 return err;
1938 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001939
1940 if (q->offload) {
1941 struct net_device *dev = qdisc_dev(sch);
1942
1943 offload_opt = (struct tc_htb_qopt_offload) {
1944 .command = TC_HTB_NODE_MODIFY,
1945 .classid = cl->common.classid,
1946 .rate = max_t(u64, hopt->rate.rate, rate64),
1947 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1948 .extack = extack,
1949 };
1950 err = htb_offload(dev, &offload_opt);
1951 if (err)
1952 /* Estimator was replaced, and rollback may fail
1953 * as well, so we don't try to recover it, and
1954 * the estimator won't work property with the
1955 * offload anyway, because bstats are updated
1956 * only when the stats are queried.
1957 */
1958 return err;
1959 }
1960
Stephen Hemminger87990462006-08-10 23:35:16 -07001961 sch_tree_lock(sch);
Patrick McHardyee39e102007-07-02 22:48:13 -07001962 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963
Yang Yingliang1598f7c2013-12-10 14:59:28 +08001964 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1965 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1966
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 /* it used to be a nasty bug here, we have to check that node
Cong Wang11957be2018-09-07 13:29:14 -07001968 * is really leaf before changing cl->leaf !
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001969 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 if (!cl->level) {
Yang Yingliang1598f7c2013-12-10 14:59:28 +08001971 u64 quantum = cl->rate.rate_bytes_ps;
1972
1973 do_div(quantum, q->rate2quantum);
1974 cl->quantum = min_t(u64, quantum, INT_MAX);
1975
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001976 if (!hopt->quantum && cl->quantum < 1000) {
Li RongQingda01ec42018-03-30 10:11:21 +08001977 warn = -1;
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001978 cl->quantum = 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979 }
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001980 if (!hopt->quantum && cl->quantum > 200000) {
Li RongQingda01ec42018-03-30 10:11:21 +08001981 warn = 1;
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001982 cl->quantum = 200000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001983 }
1984 if (hopt->quantum)
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001985 cl->quantum = hopt->quantum;
1986 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1987 cl->prio = TC_HTB_NUMPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 }
1989
Jiri Pirko324f5aa2013-02-12 00:11:59 +00001990 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
Vimalkumarf3ad8572013-09-10 17:36:37 -07001991 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
Vimalkumar56b765b2012-10-31 06:04:11 +00001992
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 sch_tree_unlock(sch);
Vlad Buslov4ce70b42019-09-24 18:51:16 +03001994 qdisc_put(parent_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995
Li RongQingda01ec42018-03-30 10:11:21 +08001996 if (warn)
1997 pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
1998 cl->common.classid, (warn == -1 ? "small" : "big"));
1999
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002000 qdisc_class_hash_grow(sch, &q->clhash);
2001
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 *arg = (unsigned long)cl;
2003 return 0;
2004
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002005err_kill_estimator:
2006 gen_kill_estimator(&cl->rate_est);
2007err_block_put:
2008 tcf_block_put(cl->block);
2009 kfree(cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010failure:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011 return err;
2012}
2013
Alexander Aringcbaacc42017-12-20 12:35:16 -05002014static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
2015 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016{
2017 struct htb_sched *q = qdisc_priv(sch);
2018 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002019
Jiri Pirko6529eab2017-05-17 11:07:55 +02002020 return cl ? cl->block : q->block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021}
2022
2023static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
Stephen Hemminger87990462006-08-10 23:35:16 -07002024 u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025{
Stephen Hemminger87990462006-08-10 23:35:16 -07002026 struct htb_class *cl = htb_find(classid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002027
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 /*if (cl && !cl->level) return 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00002029 * The line above used to be there to prevent attaching filters to
2030 * leaves. But at least tc_index filter uses this just to get class
2031 * for other reasons so that we have to allow for it.
2032 * ----
2033 * 19.6.2002 As Werner explained it is ok - bind filter is just
2034 * another way to "lock" the class - unlike "get" this lock can
2035 * be broken by class during destroy IIUC.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 */
Stephen Hemminger87990462006-08-10 23:35:16 -07002037 if (cl)
2038 cl->filter_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039 return (unsigned long)cl;
2040}
2041
2042static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
2043{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002045
Stephen Hemminger87990462006-08-10 23:35:16 -07002046 if (cl)
2047 cl->filter_cnt--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048}
2049
2050static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2051{
2052 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002053 struct htb_class *cl;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002054 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055
2056 if (arg->stop)
2057 return;
2058
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002059 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08002060 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 if (arg->count < arg->skip) {
2062 arg->count++;
2063 continue;
2064 }
2065 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2066 arg->stop = 1;
2067 return;
2068 }
2069 arg->count++;
2070 }
2071 }
2072}
2073
Eric Dumazet20fea082007-11-14 01:44:41 -08002074static const struct Qdisc_class_ops htb_class_ops = {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002075 .select_queue = htb_select_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 .graft = htb_graft,
2077 .leaf = htb_leaf,
Patrick McHardy256d61b2006-11-29 17:37:05 -08002078 .qlen_notify = htb_qlen_notify,
WANG Cong143976c2017-08-24 16:51:29 -07002079 .find = htb_search,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 .change = htb_change_class,
2081 .delete = htb_delete,
2082 .walk = htb_walk,
Jiri Pirko6529eab2017-05-17 11:07:55 +02002083 .tcf_block = htb_tcf_block,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 .bind_tcf = htb_bind_filter,
2085 .unbind_tcf = htb_unbind_filter,
2086 .dump = htb_dump_class,
2087 .dump_stats = htb_dump_class_stats,
2088};
2089
Eric Dumazet20fea082007-11-14 01:44:41 -08002090static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091 .cl_ops = &htb_class_ops,
2092 .id = "htb",
2093 .priv_size = sizeof(struct htb_sched),
2094 .enqueue = htb_enqueue,
2095 .dequeue = htb_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07002096 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 .init = htb_init,
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002098 .attach = htb_attach,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 .reset = htb_reset,
2100 .destroy = htb_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 .dump = htb_dump,
2102 .owner = THIS_MODULE,
2103};
2104
2105static int __init htb_module_init(void)
2106{
Stephen Hemminger87990462006-08-10 23:35:16 -07002107 return register_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108}
Stephen Hemminger87990462006-08-10 23:35:16 -07002109static void __exit htb_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110{
Stephen Hemminger87990462006-08-10 23:35:16 -07002111 unregister_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112}
Stephen Hemminger87990462006-08-10 23:35:16 -07002113
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114module_init(htb_module_init)
2115module_exit(htb_module_exit)
2116MODULE_LICENSE("GPL");