blob: 23a9d6242429f1f9fadd047602108daffe3708ea [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Stephen Hemminger87990462006-08-10 23:35:16 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors: Martin Devera, <devik@cdi.cz>
6 *
7 * Credits (in time order) for older HTB versions:
8 * Stef Coene <stef.coene@docum.org>
9 * HTB support at LARTC mailing list
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * Ondrej Kraus, <krauso@barr.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * found missing INIT_QDISC(htb)
12 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
13 * helped a lot to locate nasty class stall bug
14 * Andi Kleen, Jamal Hadi, Bert Hubert
15 * code review and helpful comments on shaping
16 * Tomasz Wrona, <tw@eter.tym.pl>
17 * created test case so that I was able to fix nasty bug
18 * Wilfried Weissmann
19 * spotted bug in dequeue code and helped with fix
20 * Jiri Fojtasek
21 * fixed requeue routine
22 * and many others. thanks.
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/module.h>
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070025#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/types.h>
27#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/skbuff.h>
31#include <linux/list.h>
32#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/rbtree.h>
Jarek Poplawski12247362009-02-01 01:13:22 -080034#include <linux/workqueue.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070036#include <net/netlink.h>
Jiri Pirko292f1c72013-02-12 00:12:03 +000037#include <net/sch_generic.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070038#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010039#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41/* HTB algorithm.
42 Author: devik@cdi.cz
43 ========================================================================
44 HTB is like TBF with multiple classes. It is also similar to CBQ because
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090045 it allows to assign priority to each class in hierarchy.
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 In fact it is another implementation of Floyd's formal sharing.
47
48 Levels:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090049 Each class is assigned level. Leaf has ALWAYS level 0 and root
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
51 one less than their parent.
52*/
53
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070054static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
Zheng Yongjun37f2ad22021-05-31 10:00:48 +080055#define HTB_VER 0x30011 /* major must be matched with number supplied by TC as version */
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#if HTB_VER >> 16 != TC_HTB_PROTOVER
58#error "Mismatched sch_htb.c and pkt_sch.h"
59#endif
60
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070061/* Module parameter and sysfs export */
62module_param (htb_hysteresis, int, 0640);
63MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
64
Eric Dumazet64153ce2013-06-06 14:53:16 -070065static int htb_rate_est = 0; /* htb classes have a default rate estimator */
66module_param(htb_rate_est, int, 0640);
67MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* used internaly to keep status of single class */
70enum htb_cmode {
Stephen Hemminger87990462006-08-10 23:35:16 -070071 HTB_CANT_SEND, /* class can't send and can't borrow */
72 HTB_MAY_BORROW, /* class can't send but may borrow */
73 HTB_CAN_SEND /* class can send */
Linus Torvalds1da177e2005-04-16 15:20:36 -070074};
75
Eric Dumazetc9364632013-06-15 03:30:10 -070076struct htb_prio {
77 union {
78 struct rb_root row;
79 struct rb_root feed;
80 };
81 struct rb_node *ptr;
82 /* When class changes from state 1->2 and disconnects from
83 * parent's feed then we lost ptr value and start from the
84 * first child again. Here we store classid of the
85 * last valid ptr (used when ptr is NULL).
86 */
87 u32 last_ptr_id;
88};
89
Eric Dumazetca4ec902013-06-13 07:58:30 -070090/* interior & leaf nodes; props specific to leaves are marked L:
91 * To reduce false sharing, place mostly read fields at beginning,
92 * and mostly written ones at the end.
93 */
Stephen Hemminger87990462006-08-10 23:35:16 -070094struct htb_class {
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -070095 struct Qdisc_class_common common;
Eric Dumazetca4ec902013-06-13 07:58:30 -070096 struct psched_ratecfg rate;
97 struct psched_ratecfg ceil;
98 s64 buffer, cbuffer;/* token bucket depth/rate */
99 s64 mbuffer; /* max wait time */
stephen hemmingercbd37552013-08-01 22:32:07 -0700100 u32 prio; /* these two are used only by leaves... */
Eric Dumazetca4ec902013-06-13 07:58:30 -0700101 int quantum; /* but stored for parent-to-leaf return */
102
John Fastabend25d8c0d2014-09-12 20:05:27 -0700103 struct tcf_proto __rcu *filter_list; /* class attached filters */
Jiri Pirko6529eab2017-05-17 11:07:55 +0200104 struct tcf_block *block;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700105 int filter_cnt;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700106
107 int level; /* our level (see above) */
108 unsigned int children;
109 struct htb_class *parent; /* parent class */
110
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800111 struct net_rate_estimator __rcu *rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Eric Dumazetca4ec902013-06-13 07:58:30 -0700113 /*
114 * Written often fields
115 */
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +0200116 struct gnet_stats_basic_sync bstats;
117 struct gnet_stats_basic_sync bstats_bias;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700118 struct tc_htb_xstats xstats; /* our special stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Eric Dumazetca4ec902013-06-13 07:58:30 -0700120 /* token bucket parameters */
121 s64 tokens, ctokens;/* current number of tokens */
122 s64 t_c; /* checkpoint time */
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800123
Stephen Hemminger87990462006-08-10 23:35:16 -0700124 union {
125 struct htb_class_leaf {
Eric Dumazetc9364632013-06-15 03:30:10 -0700126 int deficit[TC_HTB_MAXDEPTH];
127 struct Qdisc *q;
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +0300128 struct netdev_queue *offload_queue;
Stephen Hemminger87990462006-08-10 23:35:16 -0700129 } leaf;
130 struct htb_class_inner {
Eric Dumazetc9364632013-06-15 03:30:10 -0700131 struct htb_prio clprio[TC_HTB_NUMPRIO];
Stephen Hemminger87990462006-08-10 23:35:16 -0700132 } inner;
Cong Wang11957be2018-09-07 13:29:14 -0700133 };
Eric Dumazetca4ec902013-06-13 07:58:30 -0700134 s64 pq_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Eric Dumazetca4ec902013-06-13 07:58:30 -0700136 int prio_activity; /* for which prios are we active */
137 enum htb_cmode cmode; /* current mode of the class */
138 struct rb_node pq_node; /* node for event queue */
139 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
Eric Dumazet338ed9b2016-06-21 23:16:51 -0700140
141 unsigned int drops ____cacheline_aligned_in_smp;
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700142 unsigned int overlimits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143};
144
Eric Dumazetc9364632013-06-15 03:30:10 -0700145struct htb_level {
146 struct rb_root wait_pq;
147 struct htb_prio hprio[TC_HTB_NUMPRIO];
148};
149
Stephen Hemminger87990462006-08-10 23:35:16 -0700150struct htb_sched {
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700151 struct Qdisc_class_hash clhash;
Eric Dumazetc9364632013-06-15 03:30:10 -0700152 int defcls; /* class where unclassified flows go to */
153 int rate2quantum; /* quant = rate / rate2quantum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154
Stephen Hemminger87990462006-08-10 23:35:16 -0700155 /* filters for qdisc itself */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700156 struct tcf_proto __rcu *filter_list;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200157 struct tcf_block *block;
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800158
159#define HTB_WARN_TOOMANYEVENTS 0x1
Eric Dumazetc9364632013-06-15 03:30:10 -0700160 unsigned int warned; /* only one warning */
161 int direct_qlen;
162 struct work_struct work;
163
164 /* non shaped skbs; let them go directly thru */
Florian Westphal48da34b2016-09-18 00:57:34 +0200165 struct qdisc_skb_head direct_queue;
Cong Wangb3624872019-05-04 11:43:42 -0700166 u32 direct_pkts;
167 u32 overlimits;
Eric Dumazetc9364632013-06-15 03:30:10 -0700168
169 struct qdisc_watchdog watchdog;
170
171 s64 now; /* cached dequeue time */
Eric Dumazetc9364632013-06-15 03:30:10 -0700172
173 /* time of nearest event per level (row) */
174 s64 near_ev_cache[TC_HTB_MAXDEPTH];
175
176 int row_mask[TC_HTB_MAXDEPTH];
177
178 struct htb_level hlevel[TC_HTB_MAXDEPTH];
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200179
180 struct Qdisc **direct_qdiscs;
181 unsigned int num_direct_qdiscs;
182
183 bool offload;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184};
185
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186/* find class in global hash table using given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700187static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188{
189 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700190 struct Qdisc_class_common *clc;
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700191
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700192 clc = qdisc_class_find(&q->clhash, handle);
193 if (clc == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 return NULL;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700195 return container_of(clc, struct htb_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196}
197
WANG Cong143976c2017-08-24 16:51:29 -0700198static unsigned long htb_search(struct Qdisc *sch, u32 handle)
199{
200 return (unsigned long)htb_find(handle, sch);
201}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202/**
203 * htb_classify - classify a packet into class
204 *
205 * It returns NULL if the packet should be dropped or -1 if the packet
206 * should be passed directly thru. In all other cases leaf class is returned.
207 * We allow direct class selection by classid in priority. The we examine
208 * filters in qdisc and in inner nodes (if higher filter points to the inner
209 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900210 * internal fifo (direct). These packets then go directly thru. If we still
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300211 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 * then finish and return direct queue.
213 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000214#define HTB_DIRECT ((struct htb_class *)-1L)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Stephen Hemminger87990462006-08-10 23:35:16 -0700216static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
217 int *qerr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218{
219 struct htb_sched *q = qdisc_priv(sch);
220 struct htb_class *cl;
221 struct tcf_result res;
222 struct tcf_proto *tcf;
223 int result;
224
225 /* allow to select class by setting skb->priority to valid classid;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000226 * note that nfmark can be used too by attaching filter fw with no
227 * rules in it
228 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 if (skb->priority == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700230 return HTB_DIRECT; /* X:0 (direct flow) selected */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000231 cl = htb_find(skb->priority, sch);
Harry Mason29824312014-01-17 13:22:32 +0000232 if (cl) {
233 if (cl->level == 0)
234 return cl;
235 /* Start with inner filter chain if a non-leaf class is selected */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700236 tcf = rcu_dereference_bh(cl->filter_list);
Harry Mason29824312014-01-17 13:22:32 +0000237 } else {
John Fastabend25d8c0d2014-09-12 20:05:27 -0700238 tcf = rcu_dereference_bh(q->filter_list);
Harry Mason29824312014-01-17 13:22:32 +0000239 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700241 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Davide Caratti3aa26052021-07-28 20:08:00 +0200242 while (tcf && (result = tcf_classify(skb, NULL, tcf, &res, false)) >= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243#ifdef CONFIG_NET_CLS_ACT
244 switch (result) {
245 case TC_ACT_QUEUED:
Stephen Hemminger87990462006-08-10 23:35:16 -0700246 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +0200247 case TC_ACT_TRAP:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700248 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Gustavo A. R. Silva964201d2020-07-07 12:21:38 -0500249 fallthrough;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 case TC_ACT_SHOT:
251 return NULL;
252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253#endif
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000254 cl = (void *)res.class;
255 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 if (res.classid == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700257 return HTB_DIRECT; /* X:0 (direct flow) */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000258 cl = htb_find(res.classid, sch);
259 if (!cl)
Stephen Hemminger87990462006-08-10 23:35:16 -0700260 break; /* filter selected invalid classid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 }
262 if (!cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700263 return cl; /* we hit leaf; return it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
265 /* we have got inner class; apply inner filter chain */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700266 tcf = rcu_dereference_bh(cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267 }
268 /* classification failed; try to use default class */
Stephen Hemminger87990462006-08-10 23:35:16 -0700269 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 if (!cl || cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700271 return HTB_DIRECT; /* bad default .. this is safe bet */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 return cl;
273}
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275/**
276 * htb_add_to_id_tree - adds class to the round robin list
Yu Kuaia10541f2021-06-03 22:07:49 +0800277 * @root: the root of the tree
278 * @cl: the class to add
279 * @prio: the give prio in class
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280 *
281 * Routine adds class to the list (actually tree) sorted by classid.
282 * Make sure that class is not already on such list for given prio.
283 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700284static void htb_add_to_id_tree(struct rb_root *root,
285 struct htb_class *cl, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286{
287 struct rb_node **p = &root->rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700290 struct htb_class *c;
291 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 c = rb_entry(parent, struct htb_class, node[prio]);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700293
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700294 if (cl->common.classid > c->common.classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700296 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 p = &parent->rb_left;
298 }
299 rb_link_node(&cl->node[prio], parent, p);
300 rb_insert_color(&cl->node[prio], root);
301}
302
303/**
304 * htb_add_to_wait_tree - adds class to the event queue with delay
Yu Kuai4d7efa72021-06-05 18:18:33 +0800305 * @q: the priority event queue
306 * @cl: the class to add
307 * @delay: delay in microseconds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 *
309 * The class is added to priority event queue to indicate that class will
310 * change its mode in cl->pq_key microseconds. Make sure that class is not
311 * already in the queue.
312 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700313static void htb_add_to_wait_tree(struct htb_sched *q,
Vimalkumar56b765b2012-10-31 06:04:11 +0000314 struct htb_class *cl, s64 delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315{
Eric Dumazetc9364632013-06-15 03:30:10 -0700316 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700317
Patrick McHardyfb983d42007-03-16 01:22:39 -0700318 cl->pq_key = q->now + delay;
319 if (cl->pq_key == q->now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 cl->pq_key++;
321
322 /* update the nearest event cache */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700323 if (q->near_ev_cache[cl->level] > cl->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 q->near_ev_cache[cl->level] = cl->pq_key;
Stephen Hemminger87990462006-08-10 23:35:16 -0700325
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700327 struct htb_class *c;
328 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 c = rb_entry(parent, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700330 if (cl->pq_key >= c->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700332 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 p = &parent->rb_left;
334 }
335 rb_link_node(&cl->pq_node, parent, p);
Eric Dumazetc9364632013-06-15 03:30:10 -0700336 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
339/**
340 * htb_next_rb_node - finds next node in binary tree
Yu Kuai274e5d02021-06-05 18:18:34 +0800341 * @n: the current node in binary tree
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 *
343 * When we are past last key we return NULL.
344 * Average complexity is 2 steps per call.
345 */
Stephen Hemminger3696f622006-08-10 23:36:01 -0700346static inline void htb_next_rb_node(struct rb_node **n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347{
348 *n = rb_next(*n);
349}
350
351/**
352 * htb_add_class_to_row - add class to its row
Yu Kuai996bccc2021-06-05 18:18:35 +0800353 * @q: the priority event queue
354 * @cl: the class to add
355 * @mask: the given priorities in class in bitmap
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 *
357 * The class is added to row at priorities marked in mask.
358 * It does nothing if mask == 0.
359 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700360static inline void htb_add_class_to_row(struct htb_sched *q,
361 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 q->row_mask[cl->level] |= mask;
364 while (mask) {
365 int prio = ffz(~mask);
366 mask &= ~(1 << prio);
Eric Dumazetc9364632013-06-15 03:30:10 -0700367 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 }
369}
370
Stephen Hemminger3696f622006-08-10 23:36:01 -0700371/* If this triggers, it is a bug in this code, but it need not be fatal */
372static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
373{
Ismail Donmez81771b32006-10-03 13:49:10 -0700374 if (RB_EMPTY_NODE(rb)) {
Stephen Hemminger3696f622006-08-10 23:36:01 -0700375 WARN_ON(1);
376 } else {
377 rb_erase(rb, root);
378 RB_CLEAR_NODE(rb);
379 }
380}
381
382
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383/**
384 * htb_remove_class_from_row - removes class from its row
Yu Kuai5f8c6d02021-06-05 18:18:36 +0800385 * @q: the priority event queue
386 * @cl: the class to add
387 * @mask: the given priorities in class in bitmap
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 *
389 * The class is removed from row at priorities marked in mask.
390 * It does nothing if mask == 0.
391 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700392static inline void htb_remove_class_from_row(struct htb_sched *q,
393 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394{
395 int m = 0;
Eric Dumazetc9364632013-06-15 03:30:10 -0700396 struct htb_level *hlevel = &q->hlevel[cl->level];
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 while (mask) {
399 int prio = ffz(~mask);
Eric Dumazetc9364632013-06-15 03:30:10 -0700400 struct htb_prio *hprio = &hlevel->hprio[prio];
Stephen Hemminger3696f622006-08-10 23:36:01 -0700401
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 mask &= ~(1 << prio);
Eric Dumazetc9364632013-06-15 03:30:10 -0700403 if (hprio->ptr == cl->node + prio)
404 htb_next_rb_node(&hprio->ptr);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700405
Eric Dumazetc9364632013-06-15 03:30:10 -0700406 htb_safe_rb_erase(cl->node + prio, &hprio->row);
407 if (!hprio->row.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 m |= 1 << prio;
409 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410 q->row_mask[cl->level] &= ~m;
411}
412
413/**
414 * htb_activate_prios - creates active classe's feed chain
Yu Kuai876b5fc2021-06-05 18:18:37 +0800415 * @q: the priority event queue
416 * @cl: the class to activate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 *
418 * The class is connected to ancestors and/or appropriate rows
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900419 * for priorities it is participating on. cl->cmode must be new
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 * (activated) mode. It does nothing if cl->prio_activity == 0.
421 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700422static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423{
424 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700425 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
427 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700428 m = mask;
429 while (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 int prio = ffz(~m);
431 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700432
Cong Wang11957be2018-09-07 13:29:14 -0700433 if (p->inner.clprio[prio].feed.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434 /* parent already has its feed in use so that
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000435 * reset bit in mask as parent is already ok
436 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700438
Cong Wang11957be2018-09-07 13:29:14 -0700439 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 p->prio_activity |= mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700442 cl = p;
443 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700444
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 }
446 if (cl->cmode == HTB_CAN_SEND && mask)
Stephen Hemminger87990462006-08-10 23:35:16 -0700447 htb_add_class_to_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448}
449
450/**
451 * htb_deactivate_prios - remove class from feed chain
Yu Kuai4113be22021-06-05 18:18:38 +0800452 * @q: the priority event queue
453 * @cl: the class to deactivate
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 *
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900455 * cl->cmode must represent old mode (before deactivation). It does
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456 * nothing if cl->prio_activity == 0. Class is removed from all feed
457 * chains and rows.
458 */
459static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
460{
461 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700462 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700465 m = mask;
466 mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 while (m) {
468 int prio = ffz(~m);
469 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700470
Cong Wang11957be2018-09-07 13:29:14 -0700471 if (p->inner.clprio[prio].ptr == cl->node + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 /* we are removing child which is pointed to from
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000473 * parent feed - forget the pointer but remember
474 * classid
475 */
Cong Wang11957be2018-09-07 13:29:14 -0700476 p->inner.clprio[prio].last_ptr_id = cl->common.classid;
477 p->inner.clprio[prio].ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700479
Eric Dumazetc9364632013-06-15 03:30:10 -0700480 htb_safe_rb_erase(cl->node + prio,
Cong Wang11957be2018-09-07 13:29:14 -0700481 &p->inner.clprio[prio].feed);
Stephen Hemminger87990462006-08-10 23:35:16 -0700482
Cong Wang11957be2018-09-07 13:29:14 -0700483 if (!p->inner.clprio[prio].feed.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 mask |= 1 << prio;
485 }
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700486
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 p->prio_activity &= ~mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700488 cl = p;
489 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700492 if (cl->cmode == HTB_CAN_SEND && mask)
493 htb_remove_class_from_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494}
495
Vimalkumar56b765b2012-10-31 06:04:11 +0000496static inline s64 htb_lowater(const struct htb_class *cl)
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700497{
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700498 if (htb_hysteresis)
499 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
500 else
501 return 0;
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700502}
Vimalkumar56b765b2012-10-31 06:04:11 +0000503static inline s64 htb_hiwater(const struct htb_class *cl)
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700504{
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700505 if (htb_hysteresis)
506 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
507 else
508 return 0;
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700509}
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700510
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700511
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512/**
513 * htb_class_mode - computes and returns current class mode
Yu Kuai1e955952021-06-05 18:18:39 +0800514 * @cl: the target class
515 * @diff: diff time in microseconds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 *
517 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
518 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900519 * from now to time when cl will change its state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 * Also it is worth to note that class mode doesn't change simply
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900521 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
523 * mode transitions per time unit. The speed gain is about 1/6.
524 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700525static inline enum htb_cmode
Vimalkumar56b765b2012-10-31 06:04:11 +0000526htb_class_mode(struct htb_class *cl, s64 *diff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527{
Vimalkumar56b765b2012-10-31 06:04:11 +0000528 s64 toks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529
Stephen Hemminger87990462006-08-10 23:35:16 -0700530 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
531 *diff = -toks;
532 return HTB_CANT_SEND;
533 }
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700534
Stephen Hemminger87990462006-08-10 23:35:16 -0700535 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
536 return HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537
Stephen Hemminger87990462006-08-10 23:35:16 -0700538 *diff = -toks;
539 return HTB_MAY_BORROW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540}
541
542/**
543 * htb_change_class_mode - changes classe's mode
Yu Kuai4b479e92021-06-05 18:18:40 +0800544 * @q: the priority event queue
545 * @cl: the target class
546 * @diff: diff time in microseconds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 *
548 * This should be the only way how to change classe's mode under normal
Zheng Yongjun37f2ad22021-05-31 10:00:48 +0800549 * circumstances. Routine will update feed lists linkage, change mode
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 * and add class to the wait event queue if appropriate. New mode should
551 * be different from old one and cl->pq_key has to be valid if changing
552 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
553 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700554static void
Vimalkumar56b765b2012-10-31 06:04:11 +0000555htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
Stephen Hemminger87990462006-08-10 23:35:16 -0700556{
557 enum htb_cmode new_mode = htb_class_mode(cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558
559 if (new_mode == cl->cmode)
Stephen Hemminger87990462006-08-10 23:35:16 -0700560 return;
561
Cong Wangb3624872019-05-04 11:43:42 -0700562 if (new_mode == HTB_CANT_SEND) {
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700563 cl->overlimits++;
Cong Wangb3624872019-05-04 11:43:42 -0700564 q->overlimits++;
565 }
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700566
Stephen Hemminger87990462006-08-10 23:35:16 -0700567 if (cl->prio_activity) { /* not necessary: speed optimization */
568 if (cl->cmode != HTB_CANT_SEND)
569 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 cl->cmode = new_mode;
Stephen Hemminger87990462006-08-10 23:35:16 -0700571 if (new_mode != HTB_CANT_SEND)
572 htb_activate_prios(q, cl);
573 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 cl->cmode = new_mode;
575}
576
577/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900578 * htb_activate - inserts leaf cl into appropriate active feeds
Yu Kuai8df7e8f2021-06-05 18:18:41 +0800579 * @q: the priority event queue
580 * @cl: the target class
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 *
582 * Routine learns (new) priority of leaf and activates feed chain
583 * for the prio. It can be called on already active leaf safely.
584 * It also adds leaf into droplist.
585 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700586static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587{
Cong Wang11957be2018-09-07 13:29:14 -0700588 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700589
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 if (!cl->prio_activity) {
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800591 cl->prio_activity = 1 << cl->prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700592 htb_activate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 }
594}
595
596/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900597 * htb_deactivate - remove leaf cl from active feeds
Yu Kuai9a034f22021-06-05 18:18:42 +0800598 * @q: the priority event queue
599 * @cl: the target class
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 *
601 * Make sure that leaf is active. In the other words it can't be called
602 * with non-active leaf. It also removes class from the drop list.
603 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700604static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700606 WARN_ON(!cl->prio_activity);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700607
Stephen Hemminger87990462006-08-10 23:35:16 -0700608 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 cl->prio_activity = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610}
611
Eric Dumazet520ac302016-06-21 23:16:49 -0700612static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
613 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614{
Kees Cook3f649ab2020-06-03 13:09:38 -0700615 int ret;
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +0100616 unsigned int len = qdisc_pkt_len(skb);
Stephen Hemminger87990462006-08-10 23:35:16 -0700617 struct htb_sched *q = qdisc_priv(sch);
618 struct htb_class *cl = htb_classify(skb, sch, &ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
Stephen Hemminger87990462006-08-10 23:35:16 -0700620 if (cl == HTB_DIRECT) {
621 /* enqueue to helper queue */
622 if (q->direct_queue.qlen < q->direct_qlen) {
David S. Milleraea890b2018-07-29 16:22:13 -0700623 __qdisc_enqueue_tail(skb, &q->direct_queue);
Stephen Hemminger87990462006-08-10 23:35:16 -0700624 q->direct_pkts++;
625 } else {
Eric Dumazet520ac302016-06-21 23:16:49 -0700626 return qdisc_drop(skb, sch, to_free);
Stephen Hemminger87990462006-08-10 23:35:16 -0700627 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628#ifdef CONFIG_NET_CLS_ACT
Stephen Hemminger87990462006-08-10 23:35:16 -0700629 } else if (!cl) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700630 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700631 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700632 __qdisc_drop(skb, to_free);
Stephen Hemminger87990462006-08-10 23:35:16 -0700633 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634#endif
Cong Wang11957be2018-09-07 13:29:14 -0700635 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
Eric Dumazet520ac302016-06-21 23:16:49 -0700636 to_free)) != NET_XMIT_SUCCESS) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700637 if (net_xmit_drop_count(ret)) {
John Fastabend25331d62014-09-28 11:53:29 -0700638 qdisc_qstats_drop(sch);
Eric Dumazet338ed9b2016-06-21 23:16:51 -0700639 cl->drops++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700640 }
David S. Miller69747652008-08-17 23:55:36 -0700641 return ret;
Stephen Hemminger87990462006-08-10 23:35:16 -0700642 } else {
Stephen Hemminger87990462006-08-10 23:35:16 -0700643 htb_activate(q, cl);
644 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +0100646 sch->qstats.backlog += len;
Stephen Hemminger87990462006-08-10 23:35:16 -0700647 sch->q.qlen++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700648 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649}
650
Vimalkumar56b765b2012-10-31 06:04:11 +0000651static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
Jarek Poplawski59e42202008-12-03 21:17:27 -0800652{
Vimalkumar56b765b2012-10-31 06:04:11 +0000653 s64 toks = diff + cl->tokens;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800654
655 if (toks > cl->buffer)
656 toks = cl->buffer;
Jiri Pirko292f1c72013-02-12 00:12:03 +0000657 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
Jarek Poplawski59e42202008-12-03 21:17:27 -0800658 if (toks <= -cl->mbuffer)
659 toks = 1 - cl->mbuffer;
660
661 cl->tokens = toks;
662}
663
Vimalkumar56b765b2012-10-31 06:04:11 +0000664static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
Jarek Poplawski59e42202008-12-03 21:17:27 -0800665{
Vimalkumar56b765b2012-10-31 06:04:11 +0000666 s64 toks = diff + cl->ctokens;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800667
668 if (toks > cl->cbuffer)
669 toks = cl->cbuffer;
Jiri Pirko292f1c72013-02-12 00:12:03 +0000670 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
Jarek Poplawski59e42202008-12-03 21:17:27 -0800671 if (toks <= -cl->mbuffer)
672 toks = 1 - cl->mbuffer;
673
674 cl->ctokens = toks;
675}
676
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677/**
678 * htb_charge_class - charges amount "bytes" to leaf and ancestors
Yu Kuai0e5c9082021-06-05 18:18:43 +0800679 * @q: the priority event queue
680 * @cl: the class to start iterate
681 * @level: the minimum level to account
682 * @skb: the socket buffer
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 *
684 * Routine assumes that packet "bytes" long was dequeued from leaf cl
685 * borrowing from "level". It accounts bytes to ceil leaky bucket for
686 * leaf and all ancestors and to rate bucket for ancestors at levels
687 * "level" and higher. It also handles possible change of mode resulting
688 * from the update. Note that mode can also increase here (MAY_BORROW to
689 * CAN_SEND) because we can use more precise clock that event queue here.
690 * In such case we remove class from event queue first.
691 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700692static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700693 int level, struct sk_buff *skb)
Stephen Hemminger87990462006-08-10 23:35:16 -0700694{
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700695 int bytes = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 enum htb_cmode old_mode;
Vimalkumar56b765b2012-10-31 06:04:11 +0000697 s64 diff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
699 while (cl) {
Vimalkumar56b765b2012-10-31 06:04:11 +0000700 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 if (cl->level >= level) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700702 if (cl->level == level)
703 cl->xstats.lends++;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800704 htb_accnt_tokens(cl, bytes, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 } else {
706 cl->xstats.borrows++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700707 cl->tokens += diff; /* we moved t_c; update tokens */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 }
Jarek Poplawski59e42202008-12-03 21:17:27 -0800709 htb_accnt_ctokens(cl, bytes, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 cl->t_c = q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
Stephen Hemminger87990462006-08-10 23:35:16 -0700712 old_mode = cl->cmode;
713 diff = 0;
714 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 if (old_mode != cl->cmode) {
716 if (old_mode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -0700717 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700719 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
Eric Dumazetbfe0d022011-01-09 08:30:54 +0000722 /* update basic stats except for leaves which are already updated */
723 if (cl->level)
724 bstats_update(&cl->bstats, skb);
725
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 cl = cl->parent;
727 }
728}
729
730/**
731 * htb_do_events - make mode changes to classes at the level
Yu Kuai2c3ee532021-06-05 18:18:44 +0800732 * @q: the priority event queue
733 * @level: which wait_pq in 'q->hlevel'
734 * @start: start jiffies
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735 *
Patrick McHardyfb983d42007-03-16 01:22:39 -0700736 * Scans event queue for pending events and applies them. Returns time of
Jarek Poplawski12247362009-02-01 01:13:22 -0800737 * next pending event (0 for no event in pq, q->now for too many events).
Patrick McHardyfb983d42007-03-16 01:22:39 -0700738 * Note: Applied are events whose have cl->pq_key <= q->now.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700740static s64 htb_do_events(struct htb_sched *q, const int level,
Eric Dumazet5343a7f2013-06-04 07:11:48 +0000741 unsigned long start)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742{
Martin Devera8f3ea332008-03-23 22:00:38 -0700743 /* don't run for longer than 2 jiffies; 2 is used instead of
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000744 * 1 to simplify things when jiffy is going to be incremented
745 * too soon
746 */
Jarek Poplawskia73be042009-01-12 21:54:40 -0800747 unsigned long stop_at = start + 2;
Eric Dumazetc9364632013-06-15 03:30:10 -0700748 struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
749
Martin Devera8f3ea332008-03-23 22:00:38 -0700750 while (time_before(jiffies, stop_at)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751 struct htb_class *cl;
Vimalkumar56b765b2012-10-31 06:04:11 +0000752 s64 diff;
Eric Dumazetc9364632013-06-15 03:30:10 -0700753 struct rb_node *p = rb_first(wait_pq);
Akinbou Mita30bdbe32006-10-12 01:52:05 -0700754
Stephen Hemminger87990462006-08-10 23:35:16 -0700755 if (!p)
756 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
758 cl = rb_entry(p, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700759 if (cl->pq_key > q->now)
760 return cl->pq_key;
761
Eric Dumazetc9364632013-06-15 03:30:10 -0700762 htb_safe_rb_erase(p, wait_pq);
Vimalkumar56b765b2012-10-31 06:04:11 +0000763 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
Stephen Hemminger87990462006-08-10 23:35:16 -0700764 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700766 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 }
Jarek Poplawski12247362009-02-01 01:13:22 -0800768
769 /* too much load - let's continue after a break for scheduling */
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800770 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800771 pr_warn("htb: too many events!\n");
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800772 q->warned |= HTB_WARN_TOOMANYEVENTS;
773 }
Jarek Poplawski12247362009-02-01 01:13:22 -0800774
775 return q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776}
777
778/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000779 * is no such one exists.
780 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700781static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
782 u32 id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783{
784 struct rb_node *r = NULL;
785 while (n) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700786 struct htb_class *cl =
787 rb_entry(n, struct htb_class, node[prio]);
Stephen Hemminger87990462006-08-10 23:35:16 -0700788
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700789 if (id > cl->common.classid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 n = n->rb_right;
Jarek Poplawski1b5c0072008-12-09 22:34:40 -0800791 } else if (id < cl->common.classid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 r = n;
793 n = n->rb_left;
Jarek Poplawski1b5c0072008-12-09 22:34:40 -0800794 } else {
795 return n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 }
797 }
798 return r;
799}
800
801/**
802 * htb_lookup_leaf - returns next leaf class in DRR order
Yu Kuai9977d6f2021-06-05 18:18:45 +0800803 * @hprio: the current one
804 * @prio: which prio in class
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 *
806 * Find leaf where current feed pointers points to.
807 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700808static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809{
810 int i;
811 struct {
812 struct rb_node *root;
813 struct rb_node **pptr;
814 u32 *pid;
Stephen Hemminger87990462006-08-10 23:35:16 -0700815 } stk[TC_HTB_MAXDEPTH], *sp = stk;
816
Eric Dumazetc9364632013-06-15 03:30:10 -0700817 BUG_ON(!hprio->row.rb_node);
818 sp->root = hprio->row.rb_node;
819 sp->pptr = &hprio->ptr;
820 sp->pid = &hprio->last_ptr_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821
822 for (i = 0; i < 65535; i++) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700823 if (!*sp->pptr && *sp->pid) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900824 /* ptr was invalidated but id is valid - try to recover
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000825 * the original or next ptr
826 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700827 *sp->pptr =
828 htb_id_find_next_upper(prio, sp->root, *sp->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700830 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000831 * can become out of date quickly
832 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700833 if (!*sp->pptr) { /* we are at right end; rewind & go up */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 *sp->pptr = sp->root;
Stephen Hemminger87990462006-08-10 23:35:16 -0700835 while ((*sp->pptr)->rb_left)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 *sp->pptr = (*sp->pptr)->rb_left;
837 if (sp > stk) {
838 sp--;
Jarek Poplawski512bb432008-12-09 22:35:02 -0800839 if (!*sp->pptr) {
840 WARN_ON(1);
Stephen Hemminger87990462006-08-10 23:35:16 -0700841 return NULL;
Jarek Poplawski512bb432008-12-09 22:35:02 -0800842 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700843 htb_next_rb_node(sp->pptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 }
845 } else {
846 struct htb_class *cl;
Eric Dumazetc9364632013-06-15 03:30:10 -0700847 struct htb_prio *clp;
848
Stephen Hemminger87990462006-08-10 23:35:16 -0700849 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
850 if (!cl->level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 return cl;
Cong Wang11957be2018-09-07 13:29:14 -0700852 clp = &cl->inner.clprio[prio];
Eric Dumazetc9364632013-06-15 03:30:10 -0700853 (++sp)->root = clp->feed.rb_node;
854 sp->pptr = &clp->ptr;
855 sp->pid = &clp->last_ptr_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 }
857 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700858 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 return NULL;
860}
861
862/* dequeues packet at given priority and level; call only if
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000863 * you are sure that there is active class at prio/level
864 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700865static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
866 const int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867{
868 struct sk_buff *skb = NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700869 struct htb_class *cl, *start;
Eric Dumazetc9364632013-06-15 03:30:10 -0700870 struct htb_level *hlevel = &q->hlevel[level];
871 struct htb_prio *hprio = &hlevel->hprio[prio];
872
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 /* look initial class up in the row */
Eric Dumazetc9364632013-06-15 03:30:10 -0700874 start = cl = htb_lookup_leaf(hprio, prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700875
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 do {
877next:
Jarek Poplawski512bb432008-12-09 22:35:02 -0800878 if (unlikely(!cl))
Stephen Hemminger87990462006-08-10 23:35:16 -0700879 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 /* class can be empty - it is unlikely but can be true if leaf
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000882 * qdisc drops packets in enqueue routine or if someone used
883 * graft operation on the leaf since last dequeue;
884 * simply deactivate and skip such class
885 */
Cong Wang11957be2018-09-07 13:29:14 -0700886 if (unlikely(cl->leaf.q->q.qlen == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 struct htb_class *next;
Stephen Hemminger87990462006-08-10 23:35:16 -0700888 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890 /* row/level might become empty */
891 if ((q->row_mask[level] & (1 << prio)) == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -0700892 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
Eric Dumazetc9364632013-06-15 03:30:10 -0700894 next = htb_lookup_leaf(hprio, prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700895
896 if (cl == start) /* fix start if we just deleted it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 start = next;
898 cl = next;
899 goto next;
900 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700901
Cong Wang11957be2018-09-07 13:29:14 -0700902 skb = cl->leaf.q->dequeue(cl->leaf.q);
Stephen Hemminger87990462006-08-10 23:35:16 -0700903 if (likely(skb != NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 break;
Jarek Poplawski633fe662008-12-03 21:09:10 -0800905
Cong Wang11957be2018-09-07 13:29:14 -0700906 qdisc_warn_nonwc("htb", cl->leaf.q);
907 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
Eric Dumazetc9364632013-06-15 03:30:10 -0700908 &q->hlevel[0].hprio[prio].ptr);
909 cl = htb_lookup_leaf(hprio, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910
911 } while (cl != start);
912
913 if (likely(skb != NULL)) {
Eric Dumazet196d97f2012-11-05 16:40:49 +0000914 bstats_update(&cl->bstats, skb);
Cong Wang11957be2018-09-07 13:29:14 -0700915 cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
916 if (cl->leaf.deficit[level] < 0) {
917 cl->leaf.deficit[level] += cl->quantum;
918 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
Eric Dumazetc9364632013-06-15 03:30:10 -0700919 &q->hlevel[0].hprio[prio].ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 }
921 /* this used to be after charge_class but this constelation
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000922 * gives us slightly better performance
923 */
Cong Wang11957be2018-09-07 13:29:14 -0700924 if (!cl->leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700925 htb_deactivate(q, cl);
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700926 htb_charge_class(q, cl, level, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 }
928 return skb;
929}
930
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931static struct sk_buff *htb_dequeue(struct Qdisc *sch)
932{
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800933 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 struct htb_sched *q = qdisc_priv(sch);
935 int level;
Eric Dumazet5343a7f2013-06-04 07:11:48 +0000936 s64 next_event;
Jarek Poplawskia73be042009-01-12 21:54:40 -0800937 unsigned long start_at;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938
939 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
Florian Westphal48da34b2016-09-18 00:57:34 +0200940 skb = __qdisc_dequeue_head(&q->direct_queue);
Stephen Hemminger87990462006-08-10 23:35:16 -0700941 if (skb != NULL) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800942ok:
943 qdisc_bstats_update(sch, skb);
WANG Cong431e3a82016-02-25 14:55:02 -0800944 qdisc_qstats_backlog_dec(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 sch->q.qlen--;
946 return skb;
947 }
948
Stephen Hemminger87990462006-08-10 23:35:16 -0700949 if (!sch->q.qlen)
950 goto fin;
Eric Dumazetd2de8752014-08-22 18:32:09 -0700951 q->now = ktime_get_ns();
Jarek Poplawskia73be042009-01-12 21:54:40 -0800952 start_at = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953
Stefan Haskod2fe85d2012-12-21 15:04:59 +0000954 next_event = q->now + 5LLU * NSEC_PER_SEC;
Jarek Poplawski633fe662008-12-03 21:09:10 -0800955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
957 /* common case optimization - skip event handler quickly */
958 int m;
Eric Dumazetc9364632013-06-15 03:30:10 -0700959 s64 event = q->near_ev_cache[level];
Stephen Hemminger87990462006-08-10 23:35:16 -0700960
Eric Dumazetc9364632013-06-15 03:30:10 -0700961 if (q->now >= event) {
Jarek Poplawskia73be042009-01-12 21:54:40 -0800962 event = htb_do_events(q, level, start_at);
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700963 if (!event)
Vimalkumar56b765b2012-10-31 06:04:11 +0000964 event = q->now + NSEC_PER_SEC;
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700965 q->near_ev_cache[level] = event;
Eric Dumazetc9364632013-06-15 03:30:10 -0700966 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700967
Jarek Poplawskic0851342009-01-12 21:54:16 -0800968 if (next_event > event)
Patrick McHardyfb983d42007-03-16 01:22:39 -0700969 next_event = event;
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 m = ~q->row_mask[level];
972 while (m != (int)(-1)) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700973 int prio = ffz(m);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000974
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 m |= 1 << prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700976 skb = htb_dequeue_tree(q, prio, level);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800977 if (likely(skb != NULL))
978 goto ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 }
980 }
Eric Dumazeta9efad82016-05-23 14:24:56 -0700981 if (likely(next_event > q->now))
Eric Dumazet45f50be2016-06-10 16:41:39 -0700982 qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
Eric Dumazeta9efad82016-05-23 14:24:56 -0700983 else
Jarek Poplawski12247362009-02-01 01:13:22 -0800984 schedule_work(&q->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985fin:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 return skb;
987}
988
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989/* reset all classes */
990/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -0700991static void htb_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992{
993 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700994 struct htb_class *cl;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700995 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700997 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800998 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 if (cl->level)
Cong Wang11957be2018-09-07 13:29:14 -07001000 memset(&cl->inner, 0, sizeof(cl->inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 else {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001002 if (cl->leaf.q && !q->offload)
Cong Wang11957be2018-09-07 13:29:14 -07001003 qdisc_reset(cl->leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 }
1005 cl->prio_activity = 0;
1006 cl->cmode = HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 }
1008 }
Patrick McHardyfb983d42007-03-16 01:22:39 -07001009 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazeta5a9f5342016-06-13 20:21:56 -07001010 __qdisc_reset_queue(&q->direct_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 sch->q.qlen = 0;
WANG Cong431e3a82016-02-25 14:55:02 -08001012 sch->qstats.backlog = 0;
Eric Dumazetc9364632013-06-15 03:30:10 -07001013 memset(q->hlevel, 0, sizeof(q->hlevel));
Stephen Hemminger87990462006-08-10 23:35:16 -07001014 memset(q->row_mask, 0, sizeof(q->row_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015}
1016
Patrick McHardy27a34212008-01-23 20:35:39 -08001017static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
1018 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
1019 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
1020 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1021 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001022 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001023 [TCA_HTB_RATE64] = { .type = NLA_U64 },
1024 [TCA_HTB_CEIL64] = { .type = NLA_U64 },
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001025 [TCA_HTB_OFFLOAD] = { .type = NLA_FLAG },
Patrick McHardy27a34212008-01-23 20:35:39 -08001026};
1027
Jarek Poplawski12247362009-02-01 01:13:22 -08001028static void htb_work_func(struct work_struct *work)
1029{
1030 struct htb_sched *q = container_of(work, struct htb_sched, work);
1031 struct Qdisc *sch = q->watchdog.qdisc;
1032
Florian Westphal0ee13622016-06-14 06:16:27 +02001033 rcu_read_lock();
Jarek Poplawski12247362009-02-01 01:13:22 -08001034 __netif_schedule(qdisc_root(sch));
Florian Westphal0ee13622016-06-14 06:16:27 +02001035 rcu_read_unlock();
Jarek Poplawski12247362009-02-01 01:13:22 -08001036}
1037
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001038static void htb_set_lockdep_class_child(struct Qdisc *q)
1039{
1040 static struct lock_class_key child_key;
1041
1042 lockdep_set_class(qdisc_lock(q), &child_key);
1043}
1044
1045static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
1046{
1047 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
1048}
1049
Alexander Aringe63d7df2017-12-20 12:35:13 -05001050static int htb_init(struct Qdisc *sch, struct nlattr *opt,
1051 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001053 struct net_device *dev = qdisc_dev(sch);
1054 struct tc_htb_qopt_offload offload_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 struct htb_sched *q = qdisc_priv(sch);
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001056 struct nlattr *tb[TCA_HTB_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 struct tc_htb_glob *gopt;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001058 unsigned int ntx;
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001059 bool offload;
Patrick McHardycee63722008-01-23 20:33:32 -08001060 int err;
Patrick McHardycee63722008-01-23 20:33:32 -08001061
Nikolay Aleksandrov88c2ace2017-08-30 12:48:57 +03001062 qdisc_watchdog_init(&q->watchdog, sch);
1063 INIT_WORK(&q->work, htb_work_func);
1064
Patrick McHardycee63722008-01-23 20:33:32 -08001065 if (!opt)
1066 return -EINVAL;
1067
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001068 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001069 if (err)
1070 return err;
1071
Johannes Berg8cb08172019-04-26 14:07:28 +02001072 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1073 NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001074 if (err < 0)
1075 return err;
1076
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001077 if (!tb[TCA_HTB_INIT])
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 return -EINVAL;
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001079
Patrick McHardy1e904742008-01-22 22:11:17 -08001080 gopt = nla_data(tb[TCA_HTB_INIT]);
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001081 if (gopt->version != HTB_VER >> 16)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001084 offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001085
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001086 if (offload) {
Maxim Mikityanskiy648a9912021-10-28 15:24:36 +03001087 if (sch->parent != TC_H_ROOT) {
1088 NL_SET_ERR_MSG(extack, "HTB must be the root qdisc to use offload");
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001089 return -EOPNOTSUPP;
Maxim Mikityanskiy648a9912021-10-28 15:24:36 +03001090 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001091
Maxim Mikityanskiy648a9912021-10-28 15:24:36 +03001092 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc) {
1093 NL_SET_ERR_MSG(extack, "hw-tc-offload ethtool feature flag must be on");
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001094 return -EOPNOTSUPP;
Maxim Mikityanskiy648a9912021-10-28 15:24:36 +03001095 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001096
1097 q->num_direct_qdiscs = dev->real_num_tx_queues;
1098 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs,
1099 sizeof(*q->direct_qdiscs),
1100 GFP_KERNEL);
1101 if (!q->direct_qdiscs)
1102 return -ENOMEM;
1103 }
1104
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001105 err = qdisc_class_hash_init(&q->clhash);
1106 if (err < 0)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001107 goto err_free_direct_qdiscs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
Florian Westphal48da34b2016-09-18 00:57:34 +02001109 qdisc_skb_head_init(&q->direct_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001111 if (tb[TCA_HTB_DIRECT_QLEN])
1112 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
Phil Sutter348e3432015-08-18 10:30:49 +02001113 else
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001114 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
Phil Sutter348e3432015-08-18 10:30:49 +02001115
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1117 q->rate2quantum = 1;
1118 q->defcls = gopt->defcls;
1119
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001120 if (!offload)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001121 return 0;
1122
1123 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1124 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1125 struct Qdisc *qdisc;
1126
1127 qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1128 TC_H_MAKE(sch->handle, 0), extack);
1129 if (!qdisc) {
1130 err = -ENOMEM;
1131 goto err_free_qdiscs;
1132 }
1133
1134 htb_set_lockdep_class_child(qdisc);
1135 q->direct_qdiscs[ntx] = qdisc;
1136 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1137 }
1138
1139 sch->flags |= TCQ_F_MQROOT;
1140
1141 offload_opt = (struct tc_htb_qopt_offload) {
1142 .command = TC_HTB_CREATE,
1143 .parent_classid = TC_H_MAJ(sch->handle) >> 16,
1144 .classid = TC_H_MIN(q->defcls),
1145 .extack = extack,
1146 };
1147 err = htb_offload(dev, &offload_opt);
1148 if (err)
1149 goto err_free_qdiscs;
1150
Maxim Mikityanskiyfb3a3e32021-03-11 16:42:06 +02001151 /* Defer this assignment, so that htb_destroy skips offload-related
1152 * parts (especially calling ndo_setup_tc) on errors.
1153 */
1154 q->offload = true;
1155
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 return 0;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001157
1158err_free_qdiscs:
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001159 for (ntx = 0; ntx < q->num_direct_qdiscs && q->direct_qdiscs[ntx];
1160 ntx++)
1161 qdisc_put(q->direct_qdiscs[ntx]);
1162
1163 qdisc_class_hash_destroy(&q->clhash);
1164 /* Prevent use-after-free and double-free when htb_destroy gets called.
1165 */
1166 q->clhash.hash = NULL;
1167 q->clhash.hashsize = 0;
1168
1169err_free_direct_qdiscs:
1170 kfree(q->direct_qdiscs);
1171 q->direct_qdiscs = NULL;
1172 return err;
1173}
1174
1175static void htb_attach_offload(struct Qdisc *sch)
1176{
1177 struct net_device *dev = qdisc_dev(sch);
1178 struct htb_sched *q = qdisc_priv(sch);
1179 unsigned int ntx;
1180
1181 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1182 struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx];
1183
1184 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1185 qdisc_put(old);
1186 qdisc_hash_add(qdisc, false);
1187 }
1188 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) {
1189 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1190 struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL);
1191
1192 qdisc_put(old);
1193 }
1194
1195 kfree(q->direct_qdiscs);
1196 q->direct_qdiscs = NULL;
1197}
1198
1199static void htb_attach_software(struct Qdisc *sch)
1200{
1201 struct net_device *dev = qdisc_dev(sch);
1202 unsigned int ntx;
1203
1204 /* Resemble qdisc_graft behavior. */
1205 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1206 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1207 struct Qdisc *old = dev_graft_qdisc(dev_queue, sch);
1208
1209 qdisc_refcount_inc(sch);
1210
1211 qdisc_put(old);
1212 }
1213}
1214
1215static void htb_attach(struct Qdisc *sch)
1216{
1217 struct htb_sched *q = qdisc_priv(sch);
1218
1219 if (q->offload)
1220 htb_attach_offload(sch);
1221 else
1222 htb_attach_software(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223}
1224
1225static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1226{
1227 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001228 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 struct tc_htb_glob gopt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001231 if (q->offload)
1232 sch->flags |= TCQ_F_OFFLOADED;
1233 else
1234 sch->flags &= ~TCQ_F_OFFLOADED;
1235
Cong Wangb3624872019-05-04 11:43:42 -07001236 sch->qstats.overlimits = q->overlimits;
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001237 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1238 * no change can happen on the qdisc parameters.
1239 */
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001240
1241 gopt.direct_pkts = q->direct_pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 gopt.version = HTB_VER;
1243 gopt.rate2quantum = q->rate2quantum;
1244 gopt.defcls = q->defcls;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001245 gopt.debug = 0;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001246
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001247 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001248 if (nest == NULL)
1249 goto nla_put_failure;
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001250 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1251 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
David S. Miller1b34ec42012-03-29 05:11:39 -04001252 goto nla_put_failure;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001253 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1254 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001255
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001256 return nla_nest_end(skb, nest);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001257
Patrick McHardy1e904742008-01-22 22:11:17 -08001258nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001259 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260 return -1;
1261}
1262
1263static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
Stephen Hemminger87990462006-08-10 23:35:16 -07001264 struct sk_buff *skb, struct tcmsg *tcm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265{
Stephen Hemminger87990462006-08-10 23:35:16 -07001266 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001267 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001268 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 struct tc_htb_opt opt;
1270
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001271 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1272 * no change can happen on the class parameters.
1273 */
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001274 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1275 tcm->tcm_handle = cl->common.classid;
Cong Wang11957be2018-09-07 13:29:14 -07001276 if (!cl->level && cl->leaf.q)
1277 tcm->tcm_info = cl->leaf.q->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001279 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001280 if (nest == NULL)
1281 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Stephen Hemminger87990462006-08-10 23:35:16 -07001283 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284
Eric Dumazet01cb71d2013-06-02 13:55:05 +00001285 psched_ratecfg_getrate(&opt.rate, &cl->rate);
Jiri Pirko9c10f412013-02-12 00:12:00 +00001286 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
Eric Dumazet01cb71d2013-06-02 13:55:05 +00001287 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
Jiri Pirko9c10f412013-02-12 00:12:00 +00001288 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001289 opt.quantum = cl->quantum;
1290 opt.prio = cl->prio;
Stephen Hemminger87990462006-08-10 23:35:16 -07001291 opt.level = cl->level;
David S. Miller1b34ec42012-03-29 05:11:39 -04001292 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1293 goto nla_put_failure;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001294 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1295 goto nla_put_failure;
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001296 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001297 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1298 TCA_HTB_PAD))
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001299 goto nla_put_failure;
1300 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001301 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1302 TCA_HTB_PAD))
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001303 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001304
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001305 return nla_nest_end(skb, nest);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001306
Patrick McHardy1e904742008-01-22 22:11:17 -08001307nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001308 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 return -1;
1310}
1311
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001312static void htb_offload_aggregate_stats(struct htb_sched *q,
1313 struct htb_class *cl)
1314{
Ahmed S. Darwishf56940d2021-10-16 10:49:08 +02001315 u64 bytes = 0, packets = 0;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001316 struct htb_class *c;
1317 unsigned int i;
1318
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +02001319 gnet_stats_basic_sync_init(&cl->bstats);
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001320
1321 for (i = 0; i < q->clhash.hashsize; i++) {
1322 hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
1323 struct htb_class *p = c;
1324
1325 while (p && p->level < cl->level)
1326 p = p->parent;
1327
1328 if (p != cl)
1329 continue;
1330
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +02001331 bytes += u64_stats_read(&c->bstats_bias.bytes);
1332 packets += u64_stats_read(&c->bstats_bias.packets);
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001333 if (c->level == 0) {
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +02001334 bytes += u64_stats_read(&c->leaf.q->bstats.bytes);
1335 packets += u64_stats_read(&c->leaf.q->bstats.packets);
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001336 }
1337 }
1338 }
Ahmed S. Darwishf56940d2021-10-16 10:49:08 +02001339 _bstats_update(&cl->bstats, bytes, packets);
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001340}
1341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342static int
Stephen Hemminger87990462006-08-10 23:35:16 -07001343htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344{
Stephen Hemminger87990462006-08-10 23:35:16 -07001345 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001346 struct htb_sched *q = qdisc_priv(sch);
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001347 struct gnet_stats_queue qs = {
1348 .drops = cl->drops,
Eric Dumazet3c75f6e2017-09-18 12:36:22 -07001349 .overlimits = cl->overlimits,
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001350 };
John Fastabend64015852014-09-28 11:53:57 -07001351 __u32 qlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
Paolo Abeni5dd431b2019-03-28 16:53:12 +01001353 if (!cl->level && cl->leaf.q)
1354 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1355
Konstantin Khlebnikov0564bf02016-07-16 17:08:56 +03001356 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1357 INT_MIN, INT_MAX);
1358 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1359 INT_MIN, INT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001361 if (q->offload) {
1362 if (!cl->level) {
1363 if (cl->leaf.q)
1364 cl->bstats = cl->leaf.q->bstats;
1365 else
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +02001366 gnet_stats_basic_sync_init(&cl->bstats);
Ahmed S. Darwishf56940d2021-10-16 10:49:08 +02001367 _bstats_update(&cl->bstats,
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +02001368 u64_stats_read(&cl->bstats_bias.bytes),
1369 u64_stats_read(&cl->bstats_bias.packets));
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001370 } else {
1371 htb_offload_aggregate_stats(q, cl);
1372 }
1373 }
1374
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +02001375 if (gnet_stats_copy_basic(d, NULL, &cl->bstats, true) < 0 ||
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001376 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001377 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 return -1;
1379
1380 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1381}
1382
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001383static struct netdev_queue *
1384htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
1385{
1386 struct net_device *dev = qdisc_dev(sch);
1387 struct tc_htb_qopt_offload offload_opt;
Maxim Mikityanskiy93bde212021-03-11 16:42:05 +02001388 struct htb_sched *q = qdisc_priv(sch);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001389 int err;
1390
Maxim Mikityanskiy93bde212021-03-11 16:42:05 +02001391 if (!q->offload)
1392 return sch->dev_queue;
1393
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001394 offload_opt = (struct tc_htb_qopt_offload) {
1395 .command = TC_HTB_LEAF_QUERY_QUEUE,
1396 .classid = TC_H_MIN(tcm->tcm_parent),
1397 };
1398 err = htb_offload(dev, &offload_opt);
1399 if (err || offload_opt.qid >= dev->num_tx_queues)
1400 return NULL;
1401 return netdev_get_tx_queue(dev, offload_opt.qid);
1402}
1403
1404static struct Qdisc *
1405htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
1406{
1407 struct net_device *dev = dev_queue->dev;
1408 struct Qdisc *old_q;
1409
1410 if (dev->flags & IFF_UP)
1411 dev_deactivate(dev);
1412 old_q = dev_graft_qdisc(dev_queue, new_q);
1413 if (new_q)
1414 new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1415 if (dev->flags & IFF_UP)
1416 dev_activate(dev);
1417
1418 return old_q;
1419}
1420
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001421static struct netdev_queue *htb_offload_get_queue(struct htb_class *cl)
1422{
1423 struct netdev_queue *queue;
1424
1425 queue = cl->leaf.offload_queue;
1426 if (!(cl->leaf.q->flags & TCQ_F_BUILTIN))
1427 WARN_ON(cl->leaf.q->dev_queue != queue);
1428
1429 return queue;
1430}
1431
1432static void htb_offload_move_qdisc(struct Qdisc *sch, struct htb_class *cl_old,
1433 struct htb_class *cl_new, bool destroying)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001434{
1435 struct netdev_queue *queue_old, *queue_new;
1436 struct net_device *dev = qdisc_dev(sch);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001437
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001438 queue_old = htb_offload_get_queue(cl_old);
1439 queue_new = htb_offload_get_queue(cl_new);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001440
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001441 if (!destroying) {
1442 struct Qdisc *qdisc;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001443
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001444 if (dev->flags & IFF_UP)
1445 dev_deactivate(dev);
1446 qdisc = dev_graft_qdisc(queue_old, NULL);
1447 WARN_ON(qdisc != cl_old->leaf.q);
1448 }
1449
1450 if (!(cl_old->leaf.q->flags & TCQ_F_BUILTIN))
1451 cl_old->leaf.q->dev_queue = queue_new;
1452 cl_old->leaf.offload_queue = queue_new;
1453
1454 if (!destroying) {
1455 struct Qdisc *qdisc;
1456
1457 qdisc = dev_graft_qdisc(queue_new, cl_old->leaf.q);
1458 if (dev->flags & IFF_UP)
1459 dev_activate(dev);
1460 WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
1461 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001462}
1463
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -05001465 struct Qdisc **old, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001467 struct netdev_queue *dev_queue = sch->dev_queue;
Stephen Hemminger87990462006-08-10 23:35:16 -07001468 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001469 struct htb_sched *q = qdisc_priv(sch);
1470 struct Qdisc *old_q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001472 if (cl->level)
1473 return -EINVAL;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001474
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001475 if (q->offload)
1476 dev_queue = htb_offload_get_queue(cl);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001477
1478 if (!new) {
1479 new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1480 cl->common.classid, extack);
1481 if (!new)
1482 return -ENOBUFS;
1483 }
1484
1485 if (q->offload) {
1486 htb_set_lockdep_class_child(new);
1487 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1488 qdisc_refcount_inc(new);
1489 old_q = htb_graft_helper(dev_queue, new);
1490 }
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001491
Cong Wang11957be2018-09-07 13:29:14 -07001492 *old = qdisc_replace(sch, new, &cl->leaf.q);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001493
1494 if (q->offload) {
1495 WARN_ON(old_q != *old);
1496 qdisc_put(old_q);
1497 }
1498
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001499 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500}
1501
Stephen Hemminger87990462006-08-10 23:35:16 -07001502static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503{
Stephen Hemminger87990462006-08-10 23:35:16 -07001504 struct htb_class *cl = (struct htb_class *)arg;
Cong Wang11957be2018-09-07 13:29:14 -07001505 return !cl->level ? cl->leaf.q : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506}
1507
Patrick McHardy256d61b2006-11-29 17:37:05 -08001508static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1509{
1510 struct htb_class *cl = (struct htb_class *)arg;
1511
Konstantin Khlebnikov95946652017-08-15 16:39:59 +03001512 htb_deactivate(qdisc_priv(sch), cl);
Patrick McHardy256d61b2006-11-29 17:37:05 -08001513}
1514
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001515static inline int htb_parent_last_child(struct htb_class *cl)
1516{
1517 if (!cl->parent)
1518 /* the root class */
1519 return 0;
Patrick McHardy42077592008-07-05 23:22:53 -07001520 if (cl->parent->children > 1)
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001521 /* not the last child */
1522 return 0;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001523 return 1;
1524}
1525
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001526static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001527 struct Qdisc *new_q)
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001528{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001529 struct htb_sched *q = qdisc_priv(sch);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001530 struct htb_class *parent = cl->parent;
1531
Cong Wang11957be2018-09-07 13:29:14 -07001532 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001533
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001534 if (parent->cmode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -07001535 htb_safe_rb_erase(&parent->pq_node,
1536 &q->hlevel[parent->level].wait_pq);
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001537
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001538 parent->level = 0;
Cong Wang11957be2018-09-07 13:29:14 -07001539 memset(&parent->inner, 0, sizeof(parent->inner));
1540 parent->leaf.q = new_q ? new_q : &noop_qdisc;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001541 parent->tokens = parent->buffer;
1542 parent->ctokens = parent->cbuffer;
Eric Dumazetd2de8752014-08-22 18:32:09 -07001543 parent->t_c = ktime_get_ns();
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001544 parent->cmode = HTB_CAN_SEND;
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001545 if (q->offload)
1546 parent->leaf.offload_queue = cl->leaf.offload_queue;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001547}
1548
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001549static void htb_parent_to_leaf_offload(struct Qdisc *sch,
1550 struct netdev_queue *dev_queue,
1551 struct Qdisc *new_q)
1552{
1553 struct Qdisc *old_q;
1554
1555 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
Yunjian Wang944d6712021-06-04 19:03:18 +08001556 if (new_q)
1557 qdisc_refcount_inc(new_q);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001558 old_q = htb_graft_helper(dev_queue, new_q);
1559 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1560}
1561
1562static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
1563 bool last_child, bool destroying,
1564 struct netlink_ext_ack *extack)
1565{
1566 struct tc_htb_qopt_offload offload_opt;
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001567 struct netdev_queue *dev_queue;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001568 struct Qdisc *q = cl->leaf.q;
1569 struct Qdisc *old = NULL;
1570 int err;
1571
1572 if (cl->level)
1573 return -EINVAL;
1574
1575 WARN_ON(!q);
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001576 dev_queue = htb_offload_get_queue(cl);
1577 old = htb_graft_helper(dev_queue, NULL);
1578 if (destroying)
1579 /* Before HTB is destroyed, the kernel grafts noop_qdisc to
1580 * all queues.
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001581 */
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001582 WARN_ON(!(old->flags & TCQ_F_BUILTIN));
1583 else
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001584 WARN_ON(old != q);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001585
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001586 if (cl->parent) {
Ahmed S. Darwishf56940d2021-10-16 10:49:08 +02001587 _bstats_update(&cl->parent->bstats_bias,
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +02001588 u64_stats_read(&q->bstats.bytes),
1589 u64_stats_read(&q->bstats.packets));
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001590 }
1591
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001592 offload_opt = (struct tc_htb_qopt_offload) {
1593 .command = !last_child ? TC_HTB_LEAF_DEL :
1594 destroying ? TC_HTB_LEAF_DEL_LAST_FORCE :
1595 TC_HTB_LEAF_DEL_LAST,
1596 .classid = cl->common.classid,
1597 .extack = extack,
1598 };
1599 err = htb_offload(qdisc_dev(sch), &offload_opt);
1600
1601 if (!err || destroying)
1602 qdisc_put(old);
1603 else
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001604 htb_graft_helper(dev_queue, old);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001605
1606 if (last_child)
1607 return err;
1608
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001609 if (!err && offload_opt.classid != TC_H_MIN(cl->common.classid)) {
1610 u32 classid = TC_H_MAJ(sch->handle) |
1611 TC_H_MIN(offload_opt.classid);
1612 struct htb_class *moved_cl = htb_find(classid, sch);
1613
1614 htb_offload_move_qdisc(sch, moved_cl, cl, destroying);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001615 }
1616
1617 return err;
1618}
1619
Stephen Hemminger87990462006-08-10 23:35:16 -07001620static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001621{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622 if (!cl->level) {
Cong Wang11957be2018-09-07 13:29:14 -07001623 WARN_ON(!cl->leaf.q);
Vlad Buslov86bd4462018-09-24 19:22:50 +03001624 qdisc_put(cl->leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 }
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001626 gen_kill_estimator(&cl->rate_est);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001627 tcf_block_put(cl->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 kfree(cl);
1629}
1630
Stephen Hemminger87990462006-08-10 23:35:16 -07001631static void htb_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001633 struct net_device *dev = qdisc_dev(sch);
1634 struct tc_htb_qopt_offload offload_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 struct htb_sched *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001636 struct hlist_node *next;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001637 bool nonempty, changed;
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001638 struct htb_class *cl;
1639 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640
Jarek Poplawski12247362009-02-01 01:13:22 -08001641 cancel_work_sync(&q->work);
Patrick McHardyfb983d42007-03-16 01:22:39 -07001642 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 /* This line used to be after htb_destroy_class call below
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001644 * and surprisingly it worked in 2.4. But it must precede it
1645 * because filter need its target class alive to be able to call
1646 * unbind_filter on it (without Oops).
1647 */
Jiri Pirko6529eab2017-05-17 11:07:55 +02001648 tcf_block_put(q->block);
Stephen Hemminger87990462006-08-10 23:35:16 -07001649
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001650 for (i = 0; i < q->clhash.hashsize; i++) {
Konstantin Khlebnikov89890422017-08-15 16:35:21 +03001651 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Jiri Pirko6529eab2017-05-17 11:07:55 +02001652 tcf_block_put(cl->block);
Konstantin Khlebnikov89890422017-08-15 16:35:21 +03001653 cl->block = NULL;
1654 }
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001655 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001656
1657 do {
1658 nonempty = false;
1659 changed = false;
1660 for (i = 0; i < q->clhash.hashsize; i++) {
1661 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1662 common.hnode) {
1663 bool last_child;
1664
1665 if (!q->offload) {
1666 htb_destroy_class(sch, cl);
1667 continue;
1668 }
1669
1670 nonempty = true;
1671
1672 if (cl->level)
1673 continue;
1674
1675 changed = true;
1676
1677 last_child = htb_parent_last_child(cl);
1678 htb_destroy_class_offload(sch, cl, last_child,
1679 true, NULL);
1680 qdisc_class_hash_remove(&q->clhash,
1681 &cl->common);
1682 if (cl->parent)
1683 cl->parent->children--;
1684 if (last_child)
1685 htb_parent_to_leaf(sch, cl, NULL);
1686 htb_destroy_class(sch, cl);
1687 }
1688 }
1689 } while (changed);
1690 WARN_ON(nonempty);
1691
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001692 qdisc_class_hash_destroy(&q->clhash);
Eric Dumazeta5a9f5342016-06-13 20:21:56 -07001693 __qdisc_reset_queue(&q->direct_queue);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001694
1695 if (!q->offload)
1696 return;
1697
1698 offload_opt = (struct tc_htb_qopt_offload) {
1699 .command = TC_HTB_DESTROY,
1700 };
1701 htb_offload(dev, &offload_opt);
1702
1703 if (!q->direct_qdiscs)
1704 return;
1705 for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++)
1706 qdisc_put(q->direct_qdiscs[i]);
1707 kfree(q->direct_qdiscs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708}
1709
Maxim Mikityanskiy4dd78a72021-01-19 14:08:12 +02001710static int htb_delete(struct Qdisc *sch, unsigned long arg,
1711 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712{
1713 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001714 struct htb_class *cl = (struct htb_class *)arg;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001715 struct Qdisc *new_q = NULL;
1716 int last_child = 0;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001717 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
Yang Yinglianga071d272013-12-23 17:38:59 +08001719 /* TODO: why don't allow to delete subtree ? references ? does
1720 * tc subsys guarantee us that in htb_destroy it holds no class
1721 * refs so that we can remove children safely there ?
1722 */
Patrick McHardy42077592008-07-05 23:22:53 -07001723 if (cl->children || cl->filter_cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 return -EBUSY;
Stephen Hemminger87990462006-08-10 23:35:16 -07001725
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001726 if (!cl->level && htb_parent_last_child(cl))
1727 last_child = 1;
1728
1729 if (q->offload) {
1730 err = htb_destroy_class_offload(sch, cl, last_child, false,
1731 extack);
1732 if (err)
1733 return err;
1734 }
1735
1736 if (last_child) {
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001737 struct netdev_queue *dev_queue = sch->dev_queue;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001738
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001739 if (q->offload)
1740 dev_queue = htb_offload_get_queue(cl);
1741
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001742 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
Alexander Aringa38a98822017-12-20 12:35:21 -05001743 cl->parent->common.classid,
1744 NULL);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001745 if (q->offload) {
Yunjian Wang944d6712021-06-04 19:03:18 +08001746 if (new_q)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001747 htb_set_lockdep_class_child(new_q);
Yunjian Wang944d6712021-06-04 19:03:18 +08001748 htb_parent_to_leaf_offload(sch, dev_queue, new_q);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001749 }
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001750 }
1751
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 sch_tree_lock(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001753
Paolo Abenie5f0e8f2019-03-28 16:53:13 +01001754 if (!cl->level)
1755 qdisc_purge_queue(cl->leaf.q);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001756
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001757 /* delete from hash and active; remainder in destroy_class */
1758 qdisc_class_hash_remove(&q->clhash, &cl->common);
Jarek Poplawski26b284d2008-08-13 15:16:43 -07001759 if (cl->parent)
1760 cl->parent->children--;
Patrick McHardyc38c83c2007-03-27 14:04:24 -07001761
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001763 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001765 if (cl->cmode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -07001766 htb_safe_rb_erase(&cl->pq_node,
1767 &q->hlevel[cl->level].wait_pq);
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001768
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001769 if (last_child)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001770 htb_parent_to_leaf(sch, cl, new_q);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001771
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 sch_tree_unlock(sch);
WANG Cong143976c2017-08-24 16:51:29 -07001773
1774 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 return 0;
1776}
1777
Stephen Hemminger87990462006-08-10 23:35:16 -07001778static int htb_change_class(struct Qdisc *sch, u32 classid,
Patrick McHardy1e904742008-01-22 22:11:17 -08001779 u32 parentid, struct nlattr **tca,
Alexander Aring793d81d2017-12-20 12:35:15 -05001780 unsigned long *arg, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781{
1782 int err = -EINVAL;
1783 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001784 struct htb_class *cl = (struct htb_class *)*arg, *parent;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001785 struct tc_htb_qopt_offload offload_opt;
Patrick McHardy1e904742008-01-22 22:11:17 -08001786 struct nlattr *opt = tca[TCA_OPTIONS];
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001787 struct nlattr *tb[TCA_HTB_MAX + 1];
Vlad Buslov4ce70b42019-09-24 18:51:16 +03001788 struct Qdisc *parent_qdisc = NULL;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001789 struct netdev_queue *dev_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790 struct tc_htb_opt *hopt;
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001791 u64 rate64, ceil64;
Li RongQingda01ec42018-03-30 10:11:21 +08001792 int warn = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793
1794 /* extract all subattrs from opt attr */
Patrick McHardycee63722008-01-23 20:33:32 -08001795 if (!opt)
1796 goto failure;
1797
Johannes Berg8cb08172019-04-26 14:07:28 +02001798 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1799 NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001800 if (err < 0)
1801 goto failure;
1802
1803 err = -EINVAL;
Patrick McHardy27a34212008-01-23 20:35:39 -08001804 if (tb[TCA_HTB_PARMS] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806
Stephen Hemminger87990462006-08-10 23:35:16 -07001807 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001808
Patrick McHardy1e904742008-01-22 22:11:17 -08001809 hopt = nla_data(tb[TCA_HTB_PARMS]);
Eric Dumazet196d97f2012-11-05 16:40:49 +00001810 if (!hopt->rate.rate || !hopt->ceil.rate)
Stephen Hemminger87990462006-08-10 23:35:16 -07001811 goto failure;
1812
Maxim Mikityanskiy429c3be2022-01-25 12:06:54 +02001813 if (q->offload) {
1814 /* Options not supported by the offload. */
1815 if (hopt->rate.overhead || hopt->ceil.overhead) {
1816 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the overhead parameter");
1817 goto failure;
1818 }
1819 if (hopt->rate.mpu || hopt->ceil.mpu) {
1820 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the mpu parameter");
1821 goto failure;
1822 }
1823 if (hopt->quantum) {
1824 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the quantum parameter");
1825 goto failure;
1826 }
1827 if (hopt->prio) {
1828 NL_SET_ERR_MSG(extack, "HTB offload doesn't support the prio parameter");
1829 goto failure;
1830 }
1831 }
1832
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +02001833 /* Keeping backward compatible with rate_table based iproute2 tc */
Yang Yingliang6b1dd852013-12-11 15:48:37 +08001834 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
Alexander Aringe9bc3fa2017-12-20 12:35:18 -05001835 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1836 NULL));
Yang Yingliang6b1dd852013-12-11 15:48:37 +08001837
1838 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
Alexander Aringe9bc3fa2017-12-20 12:35:18 -05001839 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1840 NULL));
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +02001841
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001842 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1843 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1844
Stephen Hemminger87990462006-08-10 23:35:16 -07001845 if (!cl) { /* new class */
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001846 struct net_device *dev = qdisc_dev(sch);
1847 struct Qdisc *new_q, *old_q;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001848 int prio;
Patrick McHardyee39e102007-07-02 22:48:13 -07001849 struct {
Patrick McHardy1e904742008-01-22 22:11:17 -08001850 struct nlattr nla;
Patrick McHardyee39e102007-07-02 22:48:13 -07001851 struct gnet_estimator opt;
1852 } est = {
Patrick McHardy1e904742008-01-22 22:11:17 -08001853 .nla = {
1854 .nla_len = nla_attr_size(sizeof(est.opt)),
1855 .nla_type = TCA_RATE,
Patrick McHardyee39e102007-07-02 22:48:13 -07001856 },
1857 .opt = {
1858 /* 4s interval, 16s averaging constant */
1859 .interval = 2,
1860 .ewma_log = 2,
1861 },
1862 };
Stephen Hemminger3696f622006-08-10 23:36:01 -07001863
Linus Torvalds1da177e2005-04-16 15:20:36 -07001864 /* check for valid classid */
Joe Perchesf64f9e72009-11-29 16:55:45 -08001865 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1866 htb_find(classid, sch))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867 goto failure;
1868
1869 /* check maximal depth */
1870 if (parent && parent->parent && parent->parent->level < 2) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001871 pr_err("htb: tree is too deep\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 goto failure;
1873 }
1874 err = -ENOBUFS;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001875 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1876 if (!cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877 goto failure;
Stephen Hemminger87990462006-08-10 23:35:16 -07001878
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +02001879 gnet_stats_basic_sync_init(&cl->bstats);
1880 gnet_stats_basic_sync_init(&cl->bstats_bias);
Ahmed S. Darwish67c9e62702021-10-16 10:49:07 +02001881
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001882 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001883 if (err) {
1884 kfree(cl);
1885 goto failure;
1886 }
Eric Dumazet64153ce2013-06-06 14:53:16 -07001887 if (htb_rate_est || tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001888 err = gen_new_estimator(&cl->bstats, NULL,
1889 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001890 NULL,
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +02001891 true,
Eric Dumazet64153ce2013-06-06 14:53:16 -07001892 tca[TCA_RATE] ? : &est.nla);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001893 if (err)
1894 goto err_block_put;
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001895 }
1896
Patrick McHardy42077592008-07-05 23:22:53 -07001897 cl->children = 0;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001898 RB_CLEAR_NODE(&cl->pq_node);
1899
1900 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1901 RB_CLEAR_NODE(&cl->node[prio]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001903 cl->common.classid = classid;
1904
1905 /* Make sure nothing interrupts us in between of two
1906 * ndo_setup_tc calls.
1907 */
1908 ASSERT_RTNL();
1909
Linus Torvalds1da177e2005-04-16 15:20:36 -07001910 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001911 * so that can't be used inside of sch_tree_lock
1912 * -- thanks to Karlis Peisenieks
1913 */
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001914 if (!q->offload) {
1915 dev_queue = sch->dev_queue;
1916 } else if (!(parent && !parent->level)) {
1917 /* Assign a dev_queue to this classid. */
1918 offload_opt = (struct tc_htb_qopt_offload) {
1919 .command = TC_HTB_LEAF_ALLOC_QUEUE,
1920 .classid = cl->common.classid,
1921 .parent_classid = parent ?
1922 TC_H_MIN(parent->common.classid) :
1923 TC_HTB_CLASSID_ROOT,
1924 .rate = max_t(u64, hopt->rate.rate, rate64),
1925 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1926 .extack = extack,
1927 };
1928 err = htb_offload(dev, &offload_opt);
1929 if (err) {
1930 pr_err("htb: TC_HTB_LEAF_ALLOC_QUEUE failed with err = %d\n",
1931 err);
1932 goto err_kill_estimator;
1933 }
1934 dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
1935 } else { /* First child. */
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001936 dev_queue = htb_offload_get_queue(parent);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001937 old_q = htb_graft_helper(dev_queue, NULL);
1938 WARN_ON(old_q != parent->leaf.q);
1939 offload_opt = (struct tc_htb_qopt_offload) {
1940 .command = TC_HTB_LEAF_TO_INNER,
1941 .classid = cl->common.classid,
1942 .parent_classid =
1943 TC_H_MIN(parent->common.classid),
1944 .rate = max_t(u64, hopt->rate.rate, rate64),
1945 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1946 .extack = extack,
1947 };
1948 err = htb_offload(dev, &offload_opt);
1949 if (err) {
1950 pr_err("htb: TC_HTB_LEAF_TO_INNER failed with err = %d\n",
1951 err);
1952 htb_graft_helper(dev_queue, old_q);
1953 goto err_kill_estimator;
1954 }
Ahmed S. Darwishf56940d2021-10-16 10:49:08 +02001955 _bstats_update(&parent->bstats_bias,
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +02001956 u64_stats_read(&old_q->bstats.bytes),
1957 u64_stats_read(&old_q->bstats.packets));
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001958 qdisc_put(old_q);
1959 }
1960 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
Alexander Aringa38a98822017-12-20 12:35:21 -05001961 classid, NULL);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001962 if (q->offload) {
1963 if (new_q) {
1964 htb_set_lockdep_class_child(new_q);
1965 /* One ref for cl->leaf.q, the other for
1966 * dev_queue->qdisc.
1967 */
1968 qdisc_refcount_inc(new_q);
1969 }
1970 old_q = htb_graft_helper(dev_queue, new_q);
1971 /* No qdisc_put needed. */
1972 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1973 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974 sch_tree_lock(sch);
1975 if (parent && !parent->level) {
1976 /* turn parent into inner node */
Paolo Abenie5f0e8f2019-03-28 16:53:13 +01001977 qdisc_purge_queue(parent->leaf.q);
Vlad Buslov4ce70b42019-09-24 18:51:16 +03001978 parent_qdisc = parent->leaf.q;
Stephen Hemminger87990462006-08-10 23:35:16 -07001979 if (parent->prio_activity)
1980 htb_deactivate(q, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981
1982 /* remove from evt list because of level change */
1983 if (parent->cmode != HTB_CAN_SEND) {
Eric Dumazetc9364632013-06-15 03:30:10 -07001984 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 parent->cmode = HTB_CAN_SEND;
1986 }
1987 parent->level = (parent->parent ? parent->parent->level
Stephen Hemminger87990462006-08-10 23:35:16 -07001988 : TC_HTB_MAXDEPTH) - 1;
Cong Wang11957be2018-09-07 13:29:14 -07001989 memset(&parent->inner, 0, sizeof(parent->inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001991
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992 /* leaf (we) needs elementary qdisc */
Cong Wang11957be2018-09-07 13:29:14 -07001993 cl->leaf.q = new_q ? new_q : &noop_qdisc;
Maxim Mikityanskiyca49bfd2021-08-26 14:54:25 +03001994 if (q->offload)
1995 cl->leaf.offload_queue = dev_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996
Stephen Hemminger87990462006-08-10 23:35:16 -07001997 cl->parent = parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998
1999 /* set class to be in HTB_CAN_SEND state */
Jiri Pirkob9a7afd2013-02-12 00:12:02 +00002000 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
2001 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
Eric Dumazet5343a7f2013-06-04 07:11:48 +00002002 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
Eric Dumazetd2de8752014-08-22 18:32:09 -07002003 cl->t_c = ktime_get_ns();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004 cl->cmode = HTB_CAN_SEND;
2005
2006 /* attach to the hash list and parent's family */
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002007 qdisc_class_hash_insert(&q->clhash, &cl->common);
Patrick McHardy42077592008-07-05 23:22:53 -07002008 if (parent)
2009 parent->children++;
Cong Wang11957be2018-09-07 13:29:14 -07002010 if (cl->leaf.q != &noop_qdisc)
2011 qdisc_hash_add(cl->leaf.q, true);
Patrick McHardyee39e102007-07-02 22:48:13 -07002012 } else {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08002013 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07002014 err = gen_replace_estimator(&cl->bstats, NULL,
2015 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07002016 NULL,
Ahmed S. Darwish29cbcd82021-10-16 10:49:10 +02002017 true,
Stephen Hemminger71bcb092008-11-25 21:13:31 -08002018 tca[TCA_RATE]);
2019 if (err)
2020 return err;
2021 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002022
2023 if (q->offload) {
2024 struct net_device *dev = qdisc_dev(sch);
2025
2026 offload_opt = (struct tc_htb_qopt_offload) {
2027 .command = TC_HTB_NODE_MODIFY,
2028 .classid = cl->common.classid,
2029 .rate = max_t(u64, hopt->rate.rate, rate64),
2030 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
2031 .extack = extack,
2032 };
2033 err = htb_offload(dev, &offload_opt);
2034 if (err)
2035 /* Estimator was replaced, and rollback may fail
2036 * as well, so we don't try to recover it, and
2037 * the estimator won't work property with the
2038 * offload anyway, because bstats are updated
2039 * only when the stats are queried.
2040 */
2041 return err;
2042 }
2043
Stephen Hemminger87990462006-08-10 23:35:16 -07002044 sch_tree_lock(sch);
Patrick McHardyee39e102007-07-02 22:48:13 -07002045 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046
Yang Yingliang1598f7c2013-12-10 14:59:28 +08002047 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
2048 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
2049
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 /* it used to be a nasty bug here, we have to check that node
Cong Wang11957be2018-09-07 13:29:14 -07002051 * is really leaf before changing cl->leaf !
Eric Dumazetcc7ec452011-01-19 19:26:56 +00002052 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 if (!cl->level) {
Yang Yingliang1598f7c2013-12-10 14:59:28 +08002054 u64 quantum = cl->rate.rate_bytes_ps;
2055
2056 do_div(quantum, q->rate2quantum);
2057 cl->quantum = min_t(u64, quantum, INT_MAX);
2058
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08002059 if (!hopt->quantum && cl->quantum < 1000) {
Li RongQingda01ec42018-03-30 10:11:21 +08002060 warn = -1;
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08002061 cl->quantum = 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 }
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08002063 if (!hopt->quantum && cl->quantum > 200000) {
Li RongQingda01ec42018-03-30 10:11:21 +08002064 warn = 1;
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08002065 cl->quantum = 200000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 }
2067 if (hopt->quantum)
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08002068 cl->quantum = hopt->quantum;
2069 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
2070 cl->prio = TC_HTB_NUMPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 }
2072
Jiri Pirko324f5aa2013-02-12 00:11:59 +00002073 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
Vimalkumarf3ad8572013-09-10 17:36:37 -07002074 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
Vimalkumar56b765b2012-10-31 06:04:11 +00002075
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 sch_tree_unlock(sch);
Vlad Buslov4ce70b42019-09-24 18:51:16 +03002077 qdisc_put(parent_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
Li RongQingda01ec42018-03-30 10:11:21 +08002079 if (warn)
2080 pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
2081 cl->common.classid, (warn == -1 ? "small" : "big"));
2082
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002083 qdisc_class_hash_grow(sch, &q->clhash);
2084
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 *arg = (unsigned long)cl;
2086 return 0;
2087
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002088err_kill_estimator:
2089 gen_kill_estimator(&cl->rate_est);
2090err_block_put:
2091 tcf_block_put(cl->block);
2092 kfree(cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093failure:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 return err;
2095}
2096
Alexander Aringcbaacc42017-12-20 12:35:16 -05002097static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
2098 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099{
2100 struct htb_sched *q = qdisc_priv(sch);
2101 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002102
Jiri Pirko6529eab2017-05-17 11:07:55 +02002103 return cl ? cl->block : q->block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104}
2105
2106static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
Stephen Hemminger87990462006-08-10 23:35:16 -07002107 u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108{
Stephen Hemminger87990462006-08-10 23:35:16 -07002109 struct htb_class *cl = htb_find(classid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002110
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 /*if (cl && !cl->level) return 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00002112 * The line above used to be there to prevent attaching filters to
2113 * leaves. But at least tc_index filter uses this just to get class
2114 * for other reasons so that we have to allow for it.
2115 * ----
2116 * 19.6.2002 As Werner explained it is ok - bind filter is just
2117 * another way to "lock" the class - unlike "get" this lock can
2118 * be broken by class during destroy IIUC.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 */
Stephen Hemminger87990462006-08-10 23:35:16 -07002120 if (cl)
2121 cl->filter_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 return (unsigned long)cl;
2123}
2124
2125static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
2126{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002128
Stephen Hemminger87990462006-08-10 23:35:16 -07002129 if (cl)
2130 cl->filter_cnt--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131}
2132
2133static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2134{
2135 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002136 struct htb_class *cl;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002137 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138
2139 if (arg->stop)
2140 return;
2141
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002142 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08002143 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 if (arg->count < arg->skip) {
2145 arg->count++;
2146 continue;
2147 }
2148 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2149 arg->stop = 1;
2150 return;
2151 }
2152 arg->count++;
2153 }
2154 }
2155}
2156
Eric Dumazet20fea082007-11-14 01:44:41 -08002157static const struct Qdisc_class_ops htb_class_ops = {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002158 .select_queue = htb_select_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 .graft = htb_graft,
2160 .leaf = htb_leaf,
Patrick McHardy256d61b2006-11-29 17:37:05 -08002161 .qlen_notify = htb_qlen_notify,
WANG Cong143976c2017-08-24 16:51:29 -07002162 .find = htb_search,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 .change = htb_change_class,
2164 .delete = htb_delete,
2165 .walk = htb_walk,
Jiri Pirko6529eab2017-05-17 11:07:55 +02002166 .tcf_block = htb_tcf_block,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 .bind_tcf = htb_bind_filter,
2168 .unbind_tcf = htb_unbind_filter,
2169 .dump = htb_dump_class,
2170 .dump_stats = htb_dump_class_stats,
2171};
2172
Eric Dumazet20fea082007-11-14 01:44:41 -08002173static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 .cl_ops = &htb_class_ops,
2175 .id = "htb",
2176 .priv_size = sizeof(struct htb_sched),
2177 .enqueue = htb_enqueue,
2178 .dequeue = htb_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07002179 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 .init = htb_init,
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002181 .attach = htb_attach,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182 .reset = htb_reset,
2183 .destroy = htb_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 .dump = htb_dump,
2185 .owner = THIS_MODULE,
2186};
2187
2188static int __init htb_module_init(void)
2189{
Stephen Hemminger87990462006-08-10 23:35:16 -07002190 return register_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191}
Stephen Hemminger87990462006-08-10 23:35:16 -07002192static void __exit htb_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193{
Stephen Hemminger87990462006-08-10 23:35:16 -07002194 unregister_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195}
Stephen Hemminger87990462006-08-10 23:35:16 -07002196
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197module_init(htb_module_init)
2198module_exit(htb_module_exit)
2199MODULE_LICENSE("GPL");