blob: dff3adf5a9156c2412c64a10ad1b2ce9e1367433 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Stephen Hemminger87990462006-08-10 23:35:16 -07002/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 * net/sched/sch_htb.c Hierarchical token bucket, feed tree version
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors: Martin Devera, <devik@cdi.cz>
6 *
7 * Credits (in time order) for older HTB versions:
8 * Stef Coene <stef.coene@docum.org>
9 * HTB support at LARTC mailing list
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090010 * Ondrej Kraus, <krauso@barr.cz>
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * found missing INIT_QDISC(htb)
12 * Vladimir Smelhaus, Aamer Akhter, Bert Hubert
13 * helped a lot to locate nasty class stall bug
14 * Andi Kleen, Jamal Hadi, Bert Hubert
15 * code review and helpful comments on shaping
16 * Tomasz Wrona, <tw@eter.tym.pl>
17 * created test case so that I was able to fix nasty bug
18 * Wilfried Weissmann
19 * spotted bug in dequeue code and helped with fix
20 * Jiri Fojtasek
21 * fixed requeue routine
22 * and many others. thanks.
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/module.h>
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070025#include <linux/moduleparam.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/types.h>
27#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/errno.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <linux/skbuff.h>
31#include <linux/list.h>
32#include <linux/compiler.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/rbtree.h>
Jarek Poplawski12247362009-02-01 01:13:22 -080034#include <linux/workqueue.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070036#include <net/netlink.h>
Jiri Pirko292f1c72013-02-12 00:12:03 +000037#include <net/sch_generic.h>
Patrick McHardy0ba48052007-07-02 22:49:07 -070038#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010039#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
41/* HTB algorithm.
42 Author: devik@cdi.cz
43 ========================================================================
44 HTB is like TBF with multiple classes. It is also similar to CBQ because
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090045 it allows to assign priority to each class in hierarchy.
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 In fact it is another implementation of Floyd's formal sharing.
47
48 Levels:
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +090049 Each class is assigned level. Leaf has ALWAYS level 0 and root
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
51 one less than their parent.
52*/
53
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070054static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
Stephen Hemminger87990462006-08-10 23:35:16 -070055#define HTB_VER 0x30011 /* major must be matched with number suplied by TC as version */
Linus Torvalds1da177e2005-04-16 15:20:36 -070056
57#if HTB_VER >> 16 != TC_HTB_PROTOVER
58#error "Mismatched sch_htb.c and pkt_sch.h"
59#endif
60
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -070061/* Module parameter and sysfs export */
62module_param (htb_hysteresis, int, 0640);
63MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
64
Eric Dumazet64153ce2013-06-06 14:53:16 -070065static int htb_rate_est = 0; /* htb classes have a default rate estimator */
66module_param(htb_rate_est, int, 0640);
67MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* used internaly to keep status of single class */
70enum htb_cmode {
Stephen Hemminger87990462006-08-10 23:35:16 -070071 HTB_CANT_SEND, /* class can't send and can't borrow */
72 HTB_MAY_BORROW, /* class can't send but may borrow */
73 HTB_CAN_SEND /* class can send */
Linus Torvalds1da177e2005-04-16 15:20:36 -070074};
75
Eric Dumazetc9364632013-06-15 03:30:10 -070076struct htb_prio {
77 union {
78 struct rb_root row;
79 struct rb_root feed;
80 };
81 struct rb_node *ptr;
82 /* When class changes from state 1->2 and disconnects from
83 * parent's feed then we lost ptr value and start from the
84 * first child again. Here we store classid of the
85 * last valid ptr (used when ptr is NULL).
86 */
87 u32 last_ptr_id;
88};
89
Eric Dumazetca4ec902013-06-13 07:58:30 -070090/* interior & leaf nodes; props specific to leaves are marked L:
91 * To reduce false sharing, place mostly read fields at beginning,
92 * and mostly written ones at the end.
93 */
Stephen Hemminger87990462006-08-10 23:35:16 -070094struct htb_class {
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -070095 struct Qdisc_class_common common;
Eric Dumazetca4ec902013-06-13 07:58:30 -070096 struct psched_ratecfg rate;
97 struct psched_ratecfg ceil;
98 s64 buffer, cbuffer;/* token bucket depth/rate */
99 s64 mbuffer; /* max wait time */
stephen hemmingercbd37552013-08-01 22:32:07 -0700100 u32 prio; /* these two are used only by leaves... */
Eric Dumazetca4ec902013-06-13 07:58:30 -0700101 int quantum; /* but stored for parent-to-leaf return */
102
John Fastabend25d8c0d2014-09-12 20:05:27 -0700103 struct tcf_proto __rcu *filter_list; /* class attached filters */
Jiri Pirko6529eab2017-05-17 11:07:55 +0200104 struct tcf_block *block;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700105 int filter_cnt;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700106
107 int level; /* our level (see above) */
108 unsigned int children;
109 struct htb_class *parent; /* parent class */
110
Eric Dumazet1c0d32f2016-12-04 09:48:16 -0800111 struct net_rate_estimator __rcu *rate_est;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112
Eric Dumazetca4ec902013-06-13 07:58:30 -0700113 /*
114 * Written often fields
115 */
116 struct gnet_stats_basic_packed bstats;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +0200117 struct gnet_stats_basic_packed bstats_bias;
Eric Dumazetca4ec902013-06-13 07:58:30 -0700118 struct tc_htb_xstats xstats; /* our special stats */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119
Eric Dumazetca4ec902013-06-13 07:58:30 -0700120 /* token bucket parameters */
121 s64 tokens, ctokens;/* current number of tokens */
122 s64 t_c; /* checkpoint time */
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800123
Stephen Hemminger87990462006-08-10 23:35:16 -0700124 union {
125 struct htb_class_leaf {
Eric Dumazetc9364632013-06-15 03:30:10 -0700126 int deficit[TC_HTB_MAXDEPTH];
127 struct Qdisc *q;
Stephen Hemminger87990462006-08-10 23:35:16 -0700128 } leaf;
129 struct htb_class_inner {
Eric Dumazetc9364632013-06-15 03:30:10 -0700130 struct htb_prio clprio[TC_HTB_NUMPRIO];
Stephen Hemminger87990462006-08-10 23:35:16 -0700131 } inner;
Cong Wang11957be2018-09-07 13:29:14 -0700132 };
Eric Dumazetca4ec902013-06-13 07:58:30 -0700133 s64 pq_key;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Eric Dumazetca4ec902013-06-13 07:58:30 -0700135 int prio_activity; /* for which prios are we active */
136 enum htb_cmode cmode; /* current mode of the class */
137 struct rb_node pq_node; /* node for event queue */
138 struct rb_node node[TC_HTB_NUMPRIO]; /* node for self or feed tree */
Eric Dumazet338ed9b2016-06-21 23:16:51 -0700139
140 unsigned int drops ____cacheline_aligned_in_smp;
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700141 unsigned int overlimits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142};
143
Eric Dumazetc9364632013-06-15 03:30:10 -0700144struct htb_level {
145 struct rb_root wait_pq;
146 struct htb_prio hprio[TC_HTB_NUMPRIO];
147};
148
Stephen Hemminger87990462006-08-10 23:35:16 -0700149struct htb_sched {
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700150 struct Qdisc_class_hash clhash;
Eric Dumazetc9364632013-06-15 03:30:10 -0700151 int defcls; /* class where unclassified flows go to */
152 int rate2quantum; /* quant = rate / rate2quantum */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Stephen Hemminger87990462006-08-10 23:35:16 -0700154 /* filters for qdisc itself */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700155 struct tcf_proto __rcu *filter_list;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200156 struct tcf_block *block;
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800157
158#define HTB_WARN_TOOMANYEVENTS 0x1
Eric Dumazetc9364632013-06-15 03:30:10 -0700159 unsigned int warned; /* only one warning */
160 int direct_qlen;
161 struct work_struct work;
162
163 /* non shaped skbs; let them go directly thru */
Florian Westphal48da34b2016-09-18 00:57:34 +0200164 struct qdisc_skb_head direct_queue;
Cong Wangb3624872019-05-04 11:43:42 -0700165 u32 direct_pkts;
166 u32 overlimits;
Eric Dumazetc9364632013-06-15 03:30:10 -0700167
168 struct qdisc_watchdog watchdog;
169
170 s64 now; /* cached dequeue time */
Eric Dumazetc9364632013-06-15 03:30:10 -0700171
172 /* time of nearest event per level (row) */
173 s64 near_ev_cache[TC_HTB_MAXDEPTH];
174
175 int row_mask[TC_HTB_MAXDEPTH];
176
177 struct htb_level hlevel[TC_HTB_MAXDEPTH];
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200178
179 struct Qdisc **direct_qdiscs;
180 unsigned int num_direct_qdiscs;
181
182 bool offload;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183};
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185/* find class in global hash table using given handle */
Stephen Hemminger87990462006-08-10 23:35:16 -0700186static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
188 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700189 struct Qdisc_class_common *clc;
Stephen Hemminger0cef2962006-08-10 23:35:38 -0700190
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700191 clc = qdisc_class_find(&q->clhash, handle);
192 if (clc == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 return NULL;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700194 return container_of(clc, struct htb_class, common);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195}
196
WANG Cong143976c2017-08-24 16:51:29 -0700197static unsigned long htb_search(struct Qdisc *sch, u32 handle)
198{
199 return (unsigned long)htb_find(handle, sch);
200}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201/**
202 * htb_classify - classify a packet into class
203 *
204 * It returns NULL if the packet should be dropped or -1 if the packet
205 * should be passed directly thru. In all other cases leaf class is returned.
206 * We allow direct class selection by classid in priority. The we examine
207 * filters in qdisc and in inner nodes (if higher filter points to the inner
208 * node). If we end up with classid MAJOR:0 we enqueue the skb into special
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900209 * internal fifo (direct). These packets then go directly thru. If we still
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300210 * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 * then finish and return direct queue.
212 */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000213#define HTB_DIRECT ((struct htb_class *)-1L)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Stephen Hemminger87990462006-08-10 23:35:16 -0700215static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
216 int *qerr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
218 struct htb_sched *q = qdisc_priv(sch);
219 struct htb_class *cl;
220 struct tcf_result res;
221 struct tcf_proto *tcf;
222 int result;
223
224 /* allow to select class by setting skb->priority to valid classid;
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000225 * note that nfmark can be used too by attaching filter fw with no
226 * rules in it
227 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 if (skb->priority == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700229 return HTB_DIRECT; /* X:0 (direct flow) selected */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000230 cl = htb_find(skb->priority, sch);
Harry Mason29824312014-01-17 13:22:32 +0000231 if (cl) {
232 if (cl->level == 0)
233 return cl;
234 /* Start with inner filter chain if a non-leaf class is selected */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700235 tcf = rcu_dereference_bh(cl->filter_list);
Harry Mason29824312014-01-17 13:22:32 +0000236 } else {
John Fastabend25d8c0d2014-09-12 20:05:27 -0700237 tcf = rcu_dereference_bh(q->filter_list);
Harry Mason29824312014-01-17 13:22:32 +0000238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700240 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Jiri Pirko87d83092017-05-17 11:07:54 +0200241 while (tcf && (result = tcf_classify(skb, tcf, &res, false)) >= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242#ifdef CONFIG_NET_CLS_ACT
243 switch (result) {
244 case TC_ACT_QUEUED:
Stephen Hemminger87990462006-08-10 23:35:16 -0700245 case TC_ACT_STOLEN:
Jiri Pirkoe25ea212017-06-06 14:12:02 +0200246 case TC_ACT_TRAP:
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700247 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Gustavo A. R. Silva964201d2020-07-07 12:21:38 -0500248 fallthrough;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 case TC_ACT_SHOT:
250 return NULL;
251 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252#endif
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000253 cl = (void *)res.class;
254 if (!cl) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 if (res.classid == sch->handle)
Stephen Hemminger87990462006-08-10 23:35:16 -0700256 return HTB_DIRECT; /* X:0 (direct flow) */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000257 cl = htb_find(res.classid, sch);
258 if (!cl)
Stephen Hemminger87990462006-08-10 23:35:16 -0700259 break; /* filter selected invalid classid */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 }
261 if (!cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700262 return cl; /* we hit leaf; return it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
264 /* we have got inner class; apply inner filter chain */
John Fastabend25d8c0d2014-09-12 20:05:27 -0700265 tcf = rcu_dereference_bh(cl->filter_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 }
267 /* classification failed; try to use default class */
Stephen Hemminger87990462006-08-10 23:35:16 -0700268 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 if (!cl || cl->level)
Stephen Hemminger87990462006-08-10 23:35:16 -0700270 return HTB_DIRECT; /* bad default .. this is safe bet */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 return cl;
272}
273
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274/**
275 * htb_add_to_id_tree - adds class to the round robin list
276 *
277 * Routine adds class to the list (actually tree) sorted by classid.
278 * Make sure that class is not already on such list for given prio.
279 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700280static void htb_add_to_id_tree(struct rb_root *root,
281 struct htb_class *cl, int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
283 struct rb_node **p = &root->rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700286 struct htb_class *c;
287 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 c = rb_entry(parent, struct htb_class, node[prio]);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700289
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700290 if (cl->common.classid > c->common.classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700292 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 p = &parent->rb_left;
294 }
295 rb_link_node(&cl->node[prio], parent, p);
296 rb_insert_color(&cl->node[prio], root);
297}
298
299/**
300 * htb_add_to_wait_tree - adds class to the event queue with delay
301 *
302 * The class is added to priority event queue to indicate that class will
303 * change its mode in cl->pq_key microseconds. Make sure that class is not
304 * already in the queue.
305 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700306static void htb_add_to_wait_tree(struct htb_sched *q,
Vimalkumar56b765b2012-10-31 06:04:11 +0000307 struct htb_class *cl, s64 delay)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308{
Eric Dumazetc9364632013-06-15 03:30:10 -0700309 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700310
Patrick McHardyfb983d42007-03-16 01:22:39 -0700311 cl->pq_key = q->now + delay;
312 if (cl->pq_key == q->now)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 cl->pq_key++;
314
315 /* update the nearest event cache */
Patrick McHardyfb983d42007-03-16 01:22:39 -0700316 if (q->near_ev_cache[cl->level] > cl->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317 q->near_ev_cache[cl->level] = cl->pq_key;
Stephen Hemminger87990462006-08-10 23:35:16 -0700318
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 while (*p) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700320 struct htb_class *c;
321 parent = *p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 c = rb_entry(parent, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700323 if (cl->pq_key >= c->pq_key)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 p = &parent->rb_right;
Stephen Hemminger87990462006-08-10 23:35:16 -0700325 else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 p = &parent->rb_left;
327 }
328 rb_link_node(&cl->pq_node, parent, p);
Eric Dumazetc9364632013-06-15 03:30:10 -0700329 rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330}
331
332/**
333 * htb_next_rb_node - finds next node in binary tree
334 *
335 * When we are past last key we return NULL.
336 * Average complexity is 2 steps per call.
337 */
Stephen Hemminger3696f622006-08-10 23:36:01 -0700338static inline void htb_next_rb_node(struct rb_node **n)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339{
340 *n = rb_next(*n);
341}
342
343/**
344 * htb_add_class_to_row - add class to its row
345 *
346 * The class is added to row at priorities marked in mask.
347 * It does nothing if mask == 0.
348 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700349static inline void htb_add_class_to_row(struct htb_sched *q,
350 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 q->row_mask[cl->level] |= mask;
353 while (mask) {
354 int prio = ffz(~mask);
355 mask &= ~(1 << prio);
Eric Dumazetc9364632013-06-15 03:30:10 -0700356 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 }
358}
359
Stephen Hemminger3696f622006-08-10 23:36:01 -0700360/* If this triggers, it is a bug in this code, but it need not be fatal */
361static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
362{
Ismail Donmez81771b32006-10-03 13:49:10 -0700363 if (RB_EMPTY_NODE(rb)) {
Stephen Hemminger3696f622006-08-10 23:36:01 -0700364 WARN_ON(1);
365 } else {
366 rb_erase(rb, root);
367 RB_CLEAR_NODE(rb);
368 }
369}
370
371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372/**
373 * htb_remove_class_from_row - removes class from its row
374 *
375 * The class is removed from row at priorities marked in mask.
376 * It does nothing if mask == 0.
377 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700378static inline void htb_remove_class_from_row(struct htb_sched *q,
379 struct htb_class *cl, int mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
381 int m = 0;
Eric Dumazetc9364632013-06-15 03:30:10 -0700382 struct htb_level *hlevel = &q->hlevel[cl->level];
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700383
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 while (mask) {
385 int prio = ffz(~mask);
Eric Dumazetc9364632013-06-15 03:30:10 -0700386 struct htb_prio *hprio = &hlevel->hprio[prio];
Stephen Hemminger3696f622006-08-10 23:36:01 -0700387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 mask &= ~(1 << prio);
Eric Dumazetc9364632013-06-15 03:30:10 -0700389 if (hprio->ptr == cl->node + prio)
390 htb_next_rb_node(&hprio->ptr);
Stephen Hemminger3696f622006-08-10 23:36:01 -0700391
Eric Dumazetc9364632013-06-15 03:30:10 -0700392 htb_safe_rb_erase(cl->node + prio, &hprio->row);
393 if (!hprio->row.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 m |= 1 << prio;
395 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 q->row_mask[cl->level] &= ~m;
397}
398
399/**
400 * htb_activate_prios - creates active classe's feed chain
401 *
402 * The class is connected to ancestors and/or appropriate rows
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900403 * for priorities it is participating on. cl->cmode must be new
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 * (activated) mode. It does nothing if cl->prio_activity == 0.
405 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700406static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407{
408 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700409 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
411 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700412 m = mask;
413 while (m) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 int prio = ffz(~m);
415 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700416
Cong Wang11957be2018-09-07 13:29:14 -0700417 if (p->inner.clprio[prio].feed.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418 /* parent already has its feed in use so that
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000419 * reset bit in mask as parent is already ok
420 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 mask &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700422
Cong Wang11957be2018-09-07 13:29:14 -0700423 htb_add_to_id_tree(&p->inner.clprio[prio].feed, cl, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 p->prio_activity |= mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700426 cl = p;
427 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700428
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 }
430 if (cl->cmode == HTB_CAN_SEND && mask)
Stephen Hemminger87990462006-08-10 23:35:16 -0700431 htb_add_class_to_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432}
433
434/**
435 * htb_deactivate_prios - remove class from feed chain
436 *
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900437 * cl->cmode must represent old mode (before deactivation). It does
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 * nothing if cl->prio_activity == 0. Class is removed from all feed
439 * chains and rows.
440 */
441static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
442{
443 struct htb_class *p = cl->parent;
Stephen Hemminger87990462006-08-10 23:35:16 -0700444 long m, mask = cl->prio_activity;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445
446 while (cl->cmode == HTB_MAY_BORROW && p && mask) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700447 m = mask;
448 mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 while (m) {
450 int prio = ffz(~m);
451 m &= ~(1 << prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700452
Cong Wang11957be2018-09-07 13:29:14 -0700453 if (p->inner.clprio[prio].ptr == cl->node + prio) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 /* we are removing child which is pointed to from
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000455 * parent feed - forget the pointer but remember
456 * classid
457 */
Cong Wang11957be2018-09-07 13:29:14 -0700458 p->inner.clprio[prio].last_ptr_id = cl->common.classid;
459 p->inner.clprio[prio].ptr = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700461
Eric Dumazetc9364632013-06-15 03:30:10 -0700462 htb_safe_rb_erase(cl->node + prio,
Cong Wang11957be2018-09-07 13:29:14 -0700463 &p->inner.clprio[prio].feed);
Stephen Hemminger87990462006-08-10 23:35:16 -0700464
Cong Wang11957be2018-09-07 13:29:14 -0700465 if (!p->inner.clprio[prio].feed.rb_node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 mask |= 1 << prio;
467 }
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700468
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 p->prio_activity &= ~mask;
Stephen Hemminger87990462006-08-10 23:35:16 -0700470 cl = p;
471 p = cl->parent;
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700472
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700474 if (cl->cmode == HTB_CAN_SEND && mask)
475 htb_remove_class_from_row(q, cl, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
477
Vimalkumar56b765b2012-10-31 06:04:11 +0000478static inline s64 htb_lowater(const struct htb_class *cl)
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700479{
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700480 if (htb_hysteresis)
481 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
482 else
483 return 0;
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700484}
Vimalkumar56b765b2012-10-31 06:04:11 +0000485static inline s64 htb_hiwater(const struct htb_class *cl)
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700486{
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700487 if (htb_hysteresis)
488 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
489 else
490 return 0;
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700491}
Jesper Dangaard Brouer47083fc2008-06-16 16:39:32 -0700492
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700493
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494/**
495 * htb_class_mode - computes and returns current class mode
496 *
497 * It computes cl's mode at time cl->t_c+diff and returns it. If mode
498 * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900499 * from now to time when cl will change its state.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 * Also it is worth to note that class mode doesn't change simply
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900501 * at cl->{c,}tokens == 0 but there can rather be hysteresis of
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 * 0 .. -cl->{c,}buffer range. It is meant to limit number of
503 * mode transitions per time unit. The speed gain is about 1/6.
504 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700505static inline enum htb_cmode
Vimalkumar56b765b2012-10-31 06:04:11 +0000506htb_class_mode(struct htb_class *cl, s64 *diff)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507{
Vimalkumar56b765b2012-10-31 06:04:11 +0000508 s64 toks;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509
Stephen Hemminger87990462006-08-10 23:35:16 -0700510 if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
511 *diff = -toks;
512 return HTB_CANT_SEND;
513 }
Stephen Hemminger18a63e82006-08-10 23:34:02 -0700514
Stephen Hemminger87990462006-08-10 23:35:16 -0700515 if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
516 return HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
Stephen Hemminger87990462006-08-10 23:35:16 -0700518 *diff = -toks;
519 return HTB_MAY_BORROW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520}
521
522/**
523 * htb_change_class_mode - changes classe's mode
524 *
525 * This should be the only way how to change classe's mode under normal
526 * cirsumstances. Routine will update feed lists linkage, change mode
527 * and add class to the wait event queue if appropriate. New mode should
528 * be different from old one and cl->pq_key has to be valid if changing
529 * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
530 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700531static void
Vimalkumar56b765b2012-10-31 06:04:11 +0000532htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
Stephen Hemminger87990462006-08-10 23:35:16 -0700533{
534 enum htb_cmode new_mode = htb_class_mode(cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535
536 if (new_mode == cl->cmode)
Stephen Hemminger87990462006-08-10 23:35:16 -0700537 return;
538
Cong Wangb3624872019-05-04 11:43:42 -0700539 if (new_mode == HTB_CANT_SEND) {
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700540 cl->overlimits++;
Cong Wangb3624872019-05-04 11:43:42 -0700541 q->overlimits++;
542 }
Eric Dumazet3c75f6e2017-09-18 12:36:22 -0700543
Stephen Hemminger87990462006-08-10 23:35:16 -0700544 if (cl->prio_activity) { /* not necessary: speed optimization */
545 if (cl->cmode != HTB_CANT_SEND)
546 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 cl->cmode = new_mode;
Stephen Hemminger87990462006-08-10 23:35:16 -0700548 if (new_mode != HTB_CANT_SEND)
549 htb_activate_prios(q, cl);
550 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 cl->cmode = new_mode;
552}
553
554/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900555 * htb_activate - inserts leaf cl into appropriate active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 *
557 * Routine learns (new) priority of leaf and activates feed chain
558 * for the prio. It can be called on already active leaf safely.
559 * It also adds leaf into droplist.
560 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700561static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562{
Cong Wang11957be2018-09-07 13:29:14 -0700563 WARN_ON(cl->level || !cl->leaf.q || !cl->leaf.q->q.qlen);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 if (!cl->prio_activity) {
Jarek Poplawskic19f7a32008-12-03 21:09:45 -0800566 cl->prio_activity = 1 << cl->prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700567 htb_activate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 }
569}
570
571/**
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900572 * htb_deactivate - remove leaf cl from active feeds
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 *
574 * Make sure that leaf is active. In the other words it can't be called
575 * with non-active leaf. It also removes class from the drop list.
576 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700577static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578{
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700579 WARN_ON(!cl->prio_activity);
Stephen Hemminger3bf72952006-08-10 23:31:08 -0700580
Stephen Hemminger87990462006-08-10 23:35:16 -0700581 htb_deactivate_prios(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 cl->prio_activity = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583}
584
Eric Dumazet520ac302016-06-21 23:16:49 -0700585static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch,
586 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587{
Kees Cook3f649ab2020-06-03 13:09:38 -0700588 int ret;
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +0100589 unsigned int len = qdisc_pkt_len(skb);
Stephen Hemminger87990462006-08-10 23:35:16 -0700590 struct htb_sched *q = qdisc_priv(sch);
591 struct htb_class *cl = htb_classify(skb, sch, &ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592
Stephen Hemminger87990462006-08-10 23:35:16 -0700593 if (cl == HTB_DIRECT) {
594 /* enqueue to helper queue */
595 if (q->direct_queue.qlen < q->direct_qlen) {
David S. Milleraea890b2018-07-29 16:22:13 -0700596 __qdisc_enqueue_tail(skb, &q->direct_queue);
Stephen Hemminger87990462006-08-10 23:35:16 -0700597 q->direct_pkts++;
598 } else {
Eric Dumazet520ac302016-06-21 23:16:49 -0700599 return qdisc_drop(skb, sch, to_free);
Stephen Hemminger87990462006-08-10 23:35:16 -0700600 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601#ifdef CONFIG_NET_CLS_ACT
Stephen Hemminger87990462006-08-10 23:35:16 -0700602 } else if (!cl) {
Jarek Poplawskic27f3392008-08-04 22:39:11 -0700603 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700604 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700605 __qdisc_drop(skb, to_free);
Stephen Hemminger87990462006-08-10 23:35:16 -0700606 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607#endif
Cong Wang11957be2018-09-07 13:29:14 -0700608 } else if ((ret = qdisc_enqueue(skb, cl->leaf.q,
Eric Dumazet520ac302016-06-21 23:16:49 -0700609 to_free)) != NET_XMIT_SUCCESS) {
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700610 if (net_xmit_drop_count(ret)) {
John Fastabend25331d62014-09-28 11:53:29 -0700611 qdisc_qstats_drop(sch);
Eric Dumazet338ed9b2016-06-21 23:16:51 -0700612 cl->drops++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700613 }
David S. Miller69747652008-08-17 23:55:36 -0700614 return ret;
Stephen Hemminger87990462006-08-10 23:35:16 -0700615 } else {
Stephen Hemminger87990462006-08-10 23:35:16 -0700616 htb_activate(q, cl);
617 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Toke Høiland-Jørgensenf6bab192019-01-09 17:09:42 +0100619 sch->qstats.backlog += len;
Stephen Hemminger87990462006-08-10 23:35:16 -0700620 sch->q.qlen++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700621 return NET_XMIT_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622}
623
Vimalkumar56b765b2012-10-31 06:04:11 +0000624static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
Jarek Poplawski59e42202008-12-03 21:17:27 -0800625{
Vimalkumar56b765b2012-10-31 06:04:11 +0000626 s64 toks = diff + cl->tokens;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800627
628 if (toks > cl->buffer)
629 toks = cl->buffer;
Jiri Pirko292f1c72013-02-12 00:12:03 +0000630 toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
Jarek Poplawski59e42202008-12-03 21:17:27 -0800631 if (toks <= -cl->mbuffer)
632 toks = 1 - cl->mbuffer;
633
634 cl->tokens = toks;
635}
636
Vimalkumar56b765b2012-10-31 06:04:11 +0000637static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
Jarek Poplawski59e42202008-12-03 21:17:27 -0800638{
Vimalkumar56b765b2012-10-31 06:04:11 +0000639 s64 toks = diff + cl->ctokens;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800640
641 if (toks > cl->cbuffer)
642 toks = cl->cbuffer;
Jiri Pirko292f1c72013-02-12 00:12:03 +0000643 toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
Jarek Poplawski59e42202008-12-03 21:17:27 -0800644 if (toks <= -cl->mbuffer)
645 toks = 1 - cl->mbuffer;
646
647 cl->ctokens = toks;
648}
649
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650/**
651 * htb_charge_class - charges amount "bytes" to leaf and ancestors
652 *
653 * Routine assumes that packet "bytes" long was dequeued from leaf cl
654 * borrowing from "level". It accounts bytes to ceil leaky bucket for
655 * leaf and all ancestors and to rate bucket for ancestors at levels
656 * "level" and higher. It also handles possible change of mode resulting
657 * from the update. Note that mode can also increase here (MAY_BORROW to
658 * CAN_SEND) because we can use more precise clock that event queue here.
659 * In such case we remove class from event queue first.
660 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700661static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700662 int level, struct sk_buff *skb)
Stephen Hemminger87990462006-08-10 23:35:16 -0700663{
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700664 int bytes = qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 enum htb_cmode old_mode;
Vimalkumar56b765b2012-10-31 06:04:11 +0000666 s64 diff;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667
668 while (cl) {
Vimalkumar56b765b2012-10-31 06:04:11 +0000669 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (cl->level >= level) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700671 if (cl->level == level)
672 cl->xstats.lends++;
Jarek Poplawski59e42202008-12-03 21:17:27 -0800673 htb_accnt_tokens(cl, bytes, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 } else {
675 cl->xstats.borrows++;
Stephen Hemminger87990462006-08-10 23:35:16 -0700676 cl->tokens += diff; /* we moved t_c; update tokens */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 }
Jarek Poplawski59e42202008-12-03 21:17:27 -0800678 htb_accnt_ctokens(cl, bytes, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 cl->t_c = q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700680
Stephen Hemminger87990462006-08-10 23:35:16 -0700681 old_mode = cl->cmode;
682 diff = 0;
683 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 if (old_mode != cl->cmode) {
685 if (old_mode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -0700686 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700688 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690
Eric Dumazetbfe0d022011-01-09 08:30:54 +0000691 /* update basic stats except for leaves which are already updated */
692 if (cl->level)
693 bstats_update(&cl->bstats, skb);
694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 cl = cl->parent;
696 }
697}
698
699/**
700 * htb_do_events - make mode changes to classes at the level
701 *
Patrick McHardyfb983d42007-03-16 01:22:39 -0700702 * Scans event queue for pending events and applies them. Returns time of
Jarek Poplawski12247362009-02-01 01:13:22 -0800703 * next pending event (0 for no event in pq, q->now for too many events).
Patrick McHardyfb983d42007-03-16 01:22:39 -0700704 * Note: Applied are events whose have cl->pq_key <= q->now.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700706static s64 htb_do_events(struct htb_sched *q, const int level,
Eric Dumazet5343a7f2013-06-04 07:11:48 +0000707 unsigned long start)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708{
Martin Devera8f3ea332008-03-23 22:00:38 -0700709 /* don't run for longer than 2 jiffies; 2 is used instead of
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000710 * 1 to simplify things when jiffy is going to be incremented
711 * too soon
712 */
Jarek Poplawskia73be042009-01-12 21:54:40 -0800713 unsigned long stop_at = start + 2;
Eric Dumazetc9364632013-06-15 03:30:10 -0700714 struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
715
Martin Devera8f3ea332008-03-23 22:00:38 -0700716 while (time_before(jiffies, stop_at)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 struct htb_class *cl;
Vimalkumar56b765b2012-10-31 06:04:11 +0000718 s64 diff;
Eric Dumazetc9364632013-06-15 03:30:10 -0700719 struct rb_node *p = rb_first(wait_pq);
Akinbou Mita30bdbe32006-10-12 01:52:05 -0700720
Stephen Hemminger87990462006-08-10 23:35:16 -0700721 if (!p)
722 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 cl = rb_entry(p, struct htb_class, pq_node);
Patrick McHardyfb983d42007-03-16 01:22:39 -0700725 if (cl->pq_key > q->now)
726 return cl->pq_key;
727
Eric Dumazetc9364632013-06-15 03:30:10 -0700728 htb_safe_rb_erase(p, wait_pq);
Vimalkumar56b765b2012-10-31 06:04:11 +0000729 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
Stephen Hemminger87990462006-08-10 23:35:16 -0700730 htb_change_class_mode(q, cl, &diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 if (cl->cmode != HTB_CAN_SEND)
Stephen Hemminger87990462006-08-10 23:35:16 -0700732 htb_add_to_wait_tree(q, cl, diff);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 }
Jarek Poplawski12247362009-02-01 01:13:22 -0800734
735 /* too much load - let's continue after a break for scheduling */
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800736 if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800737 pr_warn("htb: too many events!\n");
Jarek Poplawskie82181d2009-02-01 01:13:05 -0800738 q->warned |= HTB_WARN_TOOMANYEVENTS;
739 }
Jarek Poplawski12247362009-02-01 01:13:22 -0800740
741 return q->now;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742}
743
744/* Returns class->node+prio from id-tree where classe's id is >= id. NULL
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000745 * is no such one exists.
746 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700747static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
748 u32 id)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749{
750 struct rb_node *r = NULL;
751 while (n) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700752 struct htb_class *cl =
753 rb_entry(n, struct htb_class, node[prio]);
Stephen Hemminger87990462006-08-10 23:35:16 -0700754
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700755 if (id > cl->common.classid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 n = n->rb_right;
Jarek Poplawski1b5c0072008-12-09 22:34:40 -0800757 } else if (id < cl->common.classid) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 r = n;
759 n = n->rb_left;
Jarek Poplawski1b5c0072008-12-09 22:34:40 -0800760 } else {
761 return n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 }
763 }
764 return r;
765}
766
767/**
768 * htb_lookup_leaf - returns next leaf class in DRR order
769 *
770 * Find leaf where current feed pointers points to.
771 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700772static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773{
774 int i;
775 struct {
776 struct rb_node *root;
777 struct rb_node **pptr;
778 u32 *pid;
Stephen Hemminger87990462006-08-10 23:35:16 -0700779 } stk[TC_HTB_MAXDEPTH], *sp = stk;
780
Eric Dumazetc9364632013-06-15 03:30:10 -0700781 BUG_ON(!hprio->row.rb_node);
782 sp->root = hprio->row.rb_node;
783 sp->pptr = &hprio->ptr;
784 sp->pid = &hprio->last_ptr_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
786 for (i = 0; i < 65535; i++) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700787 if (!*sp->pptr && *sp->pid) {
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900788 /* ptr was invalidated but id is valid - try to recover
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000789 * the original or next ptr
790 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700791 *sp->pptr =
792 htb_id_find_next_upper(prio, sp->root, *sp->pid);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700794 *sp->pid = 0; /* ptr is valid now so that remove this hint as it
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000795 * can become out of date quickly
796 */
Stephen Hemminger87990462006-08-10 23:35:16 -0700797 if (!*sp->pptr) { /* we are at right end; rewind & go up */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 *sp->pptr = sp->root;
Stephen Hemminger87990462006-08-10 23:35:16 -0700799 while ((*sp->pptr)->rb_left)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 *sp->pptr = (*sp->pptr)->rb_left;
801 if (sp > stk) {
802 sp--;
Jarek Poplawski512bb432008-12-09 22:35:02 -0800803 if (!*sp->pptr) {
804 WARN_ON(1);
Stephen Hemminger87990462006-08-10 23:35:16 -0700805 return NULL;
Jarek Poplawski512bb432008-12-09 22:35:02 -0800806 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700807 htb_next_rb_node(sp->pptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 }
809 } else {
810 struct htb_class *cl;
Eric Dumazetc9364632013-06-15 03:30:10 -0700811 struct htb_prio *clp;
812
Stephen Hemminger87990462006-08-10 23:35:16 -0700813 cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
814 if (!cl->level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 return cl;
Cong Wang11957be2018-09-07 13:29:14 -0700816 clp = &cl->inner.clprio[prio];
Eric Dumazetc9364632013-06-15 03:30:10 -0700817 (++sp)->root = clp->feed.rb_node;
818 sp->pptr = &clp->ptr;
819 sp->pid = &clp->last_ptr_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 }
821 }
Ilpo Järvinen547b7922008-07-25 21:43:18 -0700822 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 return NULL;
824}
825
826/* dequeues packet at given priority and level; call only if
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000827 * you are sure that there is active class at prio/level
828 */
Eric Dumazetc9364632013-06-15 03:30:10 -0700829static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
830 const int level)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831{
832 struct sk_buff *skb = NULL;
Stephen Hemminger87990462006-08-10 23:35:16 -0700833 struct htb_class *cl, *start;
Eric Dumazetc9364632013-06-15 03:30:10 -0700834 struct htb_level *hlevel = &q->hlevel[level];
835 struct htb_prio *hprio = &hlevel->hprio[prio];
836
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 /* look initial class up in the row */
Eric Dumazetc9364632013-06-15 03:30:10 -0700838 start = cl = htb_lookup_leaf(hprio, prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700839
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 do {
841next:
Jarek Poplawski512bb432008-12-09 22:35:02 -0800842 if (unlikely(!cl))
Stephen Hemminger87990462006-08-10 23:35:16 -0700843 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844
845 /* class can be empty - it is unlikely but can be true if leaf
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000846 * qdisc drops packets in enqueue routine or if someone used
847 * graft operation on the leaf since last dequeue;
848 * simply deactivate and skip such class
849 */
Cong Wang11957be2018-09-07 13:29:14 -0700850 if (unlikely(cl->leaf.q->q.qlen == 0)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 struct htb_class *next;
Stephen Hemminger87990462006-08-10 23:35:16 -0700852 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
854 /* row/level might become empty */
855 if ((q->row_mask[level] & (1 << prio)) == 0)
Stephen Hemminger87990462006-08-10 23:35:16 -0700856 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857
Eric Dumazetc9364632013-06-15 03:30:10 -0700858 next = htb_lookup_leaf(hprio, prio);
Stephen Hemminger87990462006-08-10 23:35:16 -0700859
860 if (cl == start) /* fix start if we just deleted it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 start = next;
862 cl = next;
863 goto next;
864 }
Stephen Hemminger87990462006-08-10 23:35:16 -0700865
Cong Wang11957be2018-09-07 13:29:14 -0700866 skb = cl->leaf.q->dequeue(cl->leaf.q);
Stephen Hemminger87990462006-08-10 23:35:16 -0700867 if (likely(skb != NULL))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 break;
Jarek Poplawski633fe662008-12-03 21:09:10 -0800869
Cong Wang11957be2018-09-07 13:29:14 -0700870 qdisc_warn_nonwc("htb", cl->leaf.q);
871 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr:
Eric Dumazetc9364632013-06-15 03:30:10 -0700872 &q->hlevel[0].hprio[prio].ptr);
873 cl = htb_lookup_leaf(hprio, prio);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875 } while (cl != start);
876
877 if (likely(skb != NULL)) {
Eric Dumazet196d97f2012-11-05 16:40:49 +0000878 bstats_update(&cl->bstats, skb);
Cong Wang11957be2018-09-07 13:29:14 -0700879 cl->leaf.deficit[level] -= qdisc_pkt_len(skb);
880 if (cl->leaf.deficit[level] < 0) {
881 cl->leaf.deficit[level] += cl->quantum;
882 htb_next_rb_node(level ? &cl->parent->inner.clprio[prio].ptr :
Eric Dumazetc9364632013-06-15 03:30:10 -0700883 &q->hlevel[0].hprio[prio].ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 }
885 /* this used to be after charge_class but this constelation
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000886 * gives us slightly better performance
887 */
Cong Wang11957be2018-09-07 13:29:14 -0700888 if (!cl->leaf.q->q.qlen)
Stephen Hemminger87990462006-08-10 23:35:16 -0700889 htb_deactivate(q, cl);
Ranjit Manomohanc9726d62007-07-10 22:43:16 -0700890 htb_charge_class(q, cl, level, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 }
892 return skb;
893}
894
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895static struct sk_buff *htb_dequeue(struct Qdisc *sch)
896{
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800897 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 struct htb_sched *q = qdisc_priv(sch);
899 int level;
Eric Dumazet5343a7f2013-06-04 07:11:48 +0000900 s64 next_event;
Jarek Poplawskia73be042009-01-12 21:54:40 -0800901 unsigned long start_at;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902
903 /* try to dequeue direct packets as high prio (!) to minimize cpu work */
Florian Westphal48da34b2016-09-18 00:57:34 +0200904 skb = __qdisc_dequeue_head(&q->direct_queue);
Stephen Hemminger87990462006-08-10 23:35:16 -0700905 if (skb != NULL) {
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800906ok:
907 qdisc_bstats_update(sch, skb);
WANG Cong431e3a82016-02-25 14:55:02 -0800908 qdisc_qstats_backlog_dec(sch, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 sch->q.qlen--;
910 return skb;
911 }
912
Stephen Hemminger87990462006-08-10 23:35:16 -0700913 if (!sch->q.qlen)
914 goto fin;
Eric Dumazetd2de8752014-08-22 18:32:09 -0700915 q->now = ktime_get_ns();
Jarek Poplawskia73be042009-01-12 21:54:40 -0800916 start_at = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917
Stefan Haskod2fe85d2012-12-21 15:04:59 +0000918 next_event = q->now + 5LLU * NSEC_PER_SEC;
Jarek Poplawski633fe662008-12-03 21:09:10 -0800919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
921 /* common case optimization - skip event handler quickly */
922 int m;
Eric Dumazetc9364632013-06-15 03:30:10 -0700923 s64 event = q->near_ev_cache[level];
Stephen Hemminger87990462006-08-10 23:35:16 -0700924
Eric Dumazetc9364632013-06-15 03:30:10 -0700925 if (q->now >= event) {
Jarek Poplawskia73be042009-01-12 21:54:40 -0800926 event = htb_do_events(q, level, start_at);
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700927 if (!event)
Vimalkumar56b765b2012-10-31 06:04:11 +0000928 event = q->now + NSEC_PER_SEC;
Patrick McHardy2e4b3b02007-05-23 23:39:54 -0700929 q->near_ev_cache[level] = event;
Eric Dumazetc9364632013-06-15 03:30:10 -0700930 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700931
Jarek Poplawskic0851342009-01-12 21:54:16 -0800932 if (next_event > event)
Patrick McHardyfb983d42007-03-16 01:22:39 -0700933 next_event = event;
934
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 m = ~q->row_mask[level];
936 while (m != (int)(-1)) {
Stephen Hemminger87990462006-08-10 23:35:16 -0700937 int prio = ffz(m);
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 m |= 1 << prio;
Stephen Hemminger87990462006-08-10 23:35:16 -0700940 skb = htb_dequeue_tree(q, prio, level);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800941 if (likely(skb != NULL))
942 goto ok;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 }
944 }
Eric Dumazeta9efad82016-05-23 14:24:56 -0700945 if (likely(next_event > q->now))
Eric Dumazet45f50be2016-06-10 16:41:39 -0700946 qdisc_watchdog_schedule_ns(&q->watchdog, next_event);
Eric Dumazeta9efad82016-05-23 14:24:56 -0700947 else
Jarek Poplawski12247362009-02-01 01:13:22 -0800948 schedule_work(&q->work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949fin:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 return skb;
951}
952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953/* reset all classes */
954/* always caled under BH & queue lock */
Stephen Hemminger87990462006-08-10 23:35:16 -0700955static void htb_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956{
957 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700958 struct htb_class *cl;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700959 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -0700961 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -0800962 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 if (cl->level)
Cong Wang11957be2018-09-07 13:29:14 -0700964 memset(&cl->inner, 0, sizeof(cl->inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 else {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200966 if (cl->leaf.q && !q->offload)
Cong Wang11957be2018-09-07 13:29:14 -0700967 qdisc_reset(cl->leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 }
969 cl->prio_activity = 0;
970 cl->cmode = HTB_CAN_SEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 }
972 }
Patrick McHardyfb983d42007-03-16 01:22:39 -0700973 qdisc_watchdog_cancel(&q->watchdog);
Eric Dumazeta5a9f5342016-06-13 20:21:56 -0700974 __qdisc_reset_queue(&q->direct_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 sch->q.qlen = 0;
WANG Cong431e3a82016-02-25 14:55:02 -0800976 sch->qstats.backlog = 0;
Eric Dumazetc9364632013-06-15 03:30:10 -0700977 memset(q->hlevel, 0, sizeof(q->hlevel));
Stephen Hemminger87990462006-08-10 23:35:16 -0700978 memset(q->row_mask, 0, sizeof(q->row_mask));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979}
980
Patrick McHardy27a34212008-01-23 20:35:39 -0800981static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
982 [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
983 [TCA_HTB_INIT] = { .len = sizeof(struct tc_htb_glob) },
984 [TCA_HTB_CTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
985 [TCA_HTB_RTAB] = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
Eric Dumazet6906f4e2013-03-06 06:49:21 +0000986 [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
Eric Dumazetdf62cdf2013-09-19 09:10:20 -0700987 [TCA_HTB_RATE64] = { .type = NLA_U64 },
988 [TCA_HTB_CEIL64] = { .type = NLA_U64 },
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +0200989 [TCA_HTB_OFFLOAD] = { .type = NLA_FLAG },
Patrick McHardy27a34212008-01-23 20:35:39 -0800990};
991
Jarek Poplawski12247362009-02-01 01:13:22 -0800992static void htb_work_func(struct work_struct *work)
993{
994 struct htb_sched *q = container_of(work, struct htb_sched, work);
995 struct Qdisc *sch = q->watchdog.qdisc;
996
Florian Westphal0ee13622016-06-14 06:16:27 +0200997 rcu_read_lock();
Jarek Poplawski12247362009-02-01 01:13:22 -0800998 __netif_schedule(qdisc_root(sch));
Florian Westphal0ee13622016-06-14 06:16:27 +0200999 rcu_read_unlock();
Jarek Poplawski12247362009-02-01 01:13:22 -08001000}
1001
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001002static void htb_set_lockdep_class_child(struct Qdisc *q)
1003{
1004 static struct lock_class_key child_key;
1005
1006 lockdep_set_class(qdisc_lock(q), &child_key);
1007}
1008
1009static int htb_offload(struct net_device *dev, struct tc_htb_qopt_offload *opt)
1010{
1011 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_HTB, opt);
1012}
1013
Alexander Aringe63d7df2017-12-20 12:35:13 -05001014static int htb_init(struct Qdisc *sch, struct nlattr *opt,
1015 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001017 struct net_device *dev = qdisc_dev(sch);
1018 struct tc_htb_qopt_offload offload_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 struct htb_sched *q = qdisc_priv(sch);
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001020 struct nlattr *tb[TCA_HTB_MAX + 1];
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 struct tc_htb_glob *gopt;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001022 unsigned int ntx;
Patrick McHardycee63722008-01-23 20:33:32 -08001023 int err;
Patrick McHardycee63722008-01-23 20:33:32 -08001024
Nikolay Aleksandrov88c2ace2017-08-30 12:48:57 +03001025 qdisc_watchdog_init(&q->watchdog, sch);
1026 INIT_WORK(&q->work, htb_work_func);
1027
Patrick McHardycee63722008-01-23 20:33:32 -08001028 if (!opt)
1029 return -EINVAL;
1030
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001031 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001032 if (err)
1033 return err;
1034
Johannes Berg8cb08172019-04-26 14:07:28 +02001035 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1036 NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001037 if (err < 0)
1038 return err;
1039
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001040 if (!tb[TCA_HTB_INIT])
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 return -EINVAL;
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001042
Patrick McHardy1e904742008-01-22 22:11:17 -08001043 gopt = nla_data(tb[TCA_HTB_INIT]);
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001044 if (gopt->version != HTB_VER >> 16)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001047 q->offload = nla_get_flag(tb[TCA_HTB_OFFLOAD]);
1048
1049 if (q->offload) {
1050 if (sch->parent != TC_H_ROOT)
1051 return -EOPNOTSUPP;
1052
1053 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
1054 return -EOPNOTSUPP;
1055
1056 q->num_direct_qdiscs = dev->real_num_tx_queues;
1057 q->direct_qdiscs = kcalloc(q->num_direct_qdiscs,
1058 sizeof(*q->direct_qdiscs),
1059 GFP_KERNEL);
1060 if (!q->direct_qdiscs)
1061 return -ENOMEM;
1062 }
1063
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001064 err = qdisc_class_hash_init(&q->clhash);
1065 if (err < 0)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001066 goto err_free_direct_qdiscs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Florian Westphal48da34b2016-09-18 00:57:34 +02001068 qdisc_skb_head_init(&q->direct_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001070 if (tb[TCA_HTB_DIRECT_QLEN])
1071 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
Phil Sutter348e3432015-08-18 10:30:49 +02001072 else
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001073 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
Phil Sutter348e3432015-08-18 10:30:49 +02001074
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 if ((q->rate2quantum = gopt->rate2quantum) < 1)
1076 q->rate2quantum = 1;
1077 q->defcls = gopt->defcls;
1078
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001079 if (!q->offload)
1080 return 0;
1081
1082 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1083 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1084 struct Qdisc *qdisc;
1085
1086 qdisc = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1087 TC_H_MAKE(sch->handle, 0), extack);
1088 if (!qdisc) {
1089 err = -ENOMEM;
1090 goto err_free_qdiscs;
1091 }
1092
1093 htb_set_lockdep_class_child(qdisc);
1094 q->direct_qdiscs[ntx] = qdisc;
1095 qdisc->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1096 }
1097
1098 sch->flags |= TCQ_F_MQROOT;
1099
1100 offload_opt = (struct tc_htb_qopt_offload) {
1101 .command = TC_HTB_CREATE,
1102 .parent_classid = TC_H_MAJ(sch->handle) >> 16,
1103 .classid = TC_H_MIN(q->defcls),
1104 .extack = extack,
1105 };
1106 err = htb_offload(dev, &offload_opt);
1107 if (err)
1108 goto err_free_qdiscs;
1109
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 return 0;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001111
1112err_free_qdiscs:
1113 /* TC_HTB_CREATE call failed, avoid any further calls to the driver. */
1114 q->offload = false;
1115
1116 for (ntx = 0; ntx < q->num_direct_qdiscs && q->direct_qdiscs[ntx];
1117 ntx++)
1118 qdisc_put(q->direct_qdiscs[ntx]);
1119
1120 qdisc_class_hash_destroy(&q->clhash);
1121 /* Prevent use-after-free and double-free when htb_destroy gets called.
1122 */
1123 q->clhash.hash = NULL;
1124 q->clhash.hashsize = 0;
1125
1126err_free_direct_qdiscs:
1127 kfree(q->direct_qdiscs);
1128 q->direct_qdiscs = NULL;
1129 return err;
1130}
1131
1132static void htb_attach_offload(struct Qdisc *sch)
1133{
1134 struct net_device *dev = qdisc_dev(sch);
1135 struct htb_sched *q = qdisc_priv(sch);
1136 unsigned int ntx;
1137
1138 for (ntx = 0; ntx < q->num_direct_qdiscs; ntx++) {
1139 struct Qdisc *old, *qdisc = q->direct_qdiscs[ntx];
1140
1141 old = dev_graft_qdisc(qdisc->dev_queue, qdisc);
1142 qdisc_put(old);
1143 qdisc_hash_add(qdisc, false);
1144 }
1145 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) {
1146 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1147 struct Qdisc *old = dev_graft_qdisc(dev_queue, NULL);
1148
1149 qdisc_put(old);
1150 }
1151
1152 kfree(q->direct_qdiscs);
1153 q->direct_qdiscs = NULL;
1154}
1155
1156static void htb_attach_software(struct Qdisc *sch)
1157{
1158 struct net_device *dev = qdisc_dev(sch);
1159 unsigned int ntx;
1160
1161 /* Resemble qdisc_graft behavior. */
1162 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) {
1163 struct netdev_queue *dev_queue = netdev_get_tx_queue(dev, ntx);
1164 struct Qdisc *old = dev_graft_qdisc(dev_queue, sch);
1165
1166 qdisc_refcount_inc(sch);
1167
1168 qdisc_put(old);
1169 }
1170}
1171
1172static void htb_attach(struct Qdisc *sch)
1173{
1174 struct htb_sched *q = qdisc_priv(sch);
1175
1176 if (q->offload)
1177 htb_attach_offload(sch);
1178 else
1179 htb_attach_software(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180}
1181
1182static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1183{
1184 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001185 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 struct tc_htb_glob gopt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001188 if (q->offload)
1189 sch->flags |= TCQ_F_OFFLOADED;
1190 else
1191 sch->flags &= ~TCQ_F_OFFLOADED;
1192
Cong Wangb3624872019-05-04 11:43:42 -07001193 sch->qstats.overlimits = q->overlimits;
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001194 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1195 * no change can happen on the qdisc parameters.
1196 */
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001197
1198 gopt.direct_pkts = q->direct_pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 gopt.version = HTB_VER;
1200 gopt.rate2quantum = q->rate2quantum;
1201 gopt.defcls = q->defcls;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001202 gopt.debug = 0;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001203
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001204 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001205 if (nest == NULL)
1206 goto nla_put_failure;
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001207 if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1208 nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
David S. Miller1b34ec42012-03-29 05:11:39 -04001209 goto nla_put_failure;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001210 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1211 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001212
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001213 return nla_nest_end(skb, nest);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001214
Patrick McHardy1e904742008-01-22 22:11:17 -08001215nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001216 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 return -1;
1218}
1219
1220static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
Stephen Hemminger87990462006-08-10 23:35:16 -07001221 struct sk_buff *skb, struct tcmsg *tcm)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222{
Stephen Hemminger87990462006-08-10 23:35:16 -07001223 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001224 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001225 struct nlattr *nest;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 struct tc_htb_opt opt;
1227
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001228 /* Its safe to not acquire qdisc lock. As we hold RTNL,
1229 * no change can happen on the class parameters.
1230 */
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001231 tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1232 tcm->tcm_handle = cl->common.classid;
Cong Wang11957be2018-09-07 13:29:14 -07001233 if (!cl->level && cl->leaf.q)
1234 tcm->tcm_info = cl->leaf.q->handle;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
Michal Kubecekae0be8d2019-04-26 11:13:06 +02001236 nest = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001237 if (nest == NULL)
1238 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239
Stephen Hemminger87990462006-08-10 23:35:16 -07001240 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
Eric Dumazet01cb71d2013-06-02 13:55:05 +00001242 psched_ratecfg_getrate(&opt.rate, &cl->rate);
Jiri Pirko9c10f412013-02-12 00:12:00 +00001243 opt.buffer = PSCHED_NS2TICKS(cl->buffer);
Eric Dumazet01cb71d2013-06-02 13:55:05 +00001244 psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
Jiri Pirko9c10f412013-02-12 00:12:00 +00001245 opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001246 opt.quantum = cl->quantum;
1247 opt.prio = cl->prio;
Stephen Hemminger87990462006-08-10 23:35:16 -07001248 opt.level = cl->level;
David S. Miller1b34ec42012-03-29 05:11:39 -04001249 if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1250 goto nla_put_failure;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001251 if (q->offload && nla_put_flag(skb, TCA_HTB_OFFLOAD))
1252 goto nla_put_failure;
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001253 if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001254 nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1255 TCA_HTB_PAD))
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001256 goto nla_put_failure;
1257 if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
Nicolas Dichtel2a51c1e2016-04-25 10:25:15 +02001258 nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1259 TCA_HTB_PAD))
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001260 goto nla_put_failure;
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001261
Eric Dumazet6f542ef2014-03-05 10:14:34 -08001262 return nla_nest_end(skb, nest);
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001263
Patrick McHardy1e904742008-01-22 22:11:17 -08001264nla_put_failure:
Patrick McHardy4b3550ef2008-01-23 20:34:11 -08001265 nla_nest_cancel(skb, nest);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 return -1;
1267}
1268
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001269static void htb_offload_aggregate_stats(struct htb_sched *q,
1270 struct htb_class *cl)
1271{
1272 struct htb_class *c;
1273 unsigned int i;
1274
1275 memset(&cl->bstats, 0, sizeof(cl->bstats));
1276
1277 for (i = 0; i < q->clhash.hashsize; i++) {
1278 hlist_for_each_entry(c, &q->clhash.hash[i], common.hnode) {
1279 struct htb_class *p = c;
1280
1281 while (p && p->level < cl->level)
1282 p = p->parent;
1283
1284 if (p != cl)
1285 continue;
1286
1287 cl->bstats.bytes += c->bstats_bias.bytes;
1288 cl->bstats.packets += c->bstats_bias.packets;
1289 if (c->level == 0) {
1290 cl->bstats.bytes += c->leaf.q->bstats.bytes;
1291 cl->bstats.packets += c->leaf.q->bstats.packets;
1292 }
1293 }
1294 }
1295}
1296
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297static int
Stephen Hemminger87990462006-08-10 23:35:16 -07001298htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299{
Stephen Hemminger87990462006-08-10 23:35:16 -07001300 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001301 struct htb_sched *q = qdisc_priv(sch);
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001302 struct gnet_stats_queue qs = {
1303 .drops = cl->drops,
Eric Dumazet3c75f6e2017-09-18 12:36:22 -07001304 .overlimits = cl->overlimits,
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001305 };
John Fastabend64015852014-09-28 11:53:57 -07001306 __u32 qlen = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307
Paolo Abeni5dd431b2019-03-28 16:53:12 +01001308 if (!cl->level && cl->leaf.q)
1309 qdisc_qstats_qlen_backlog(cl->leaf.q, &qlen, &qs.backlog);
1310
Konstantin Khlebnikov0564bf02016-07-16 17:08:56 +03001311 cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1312 INT_MIN, INT_MAX);
1313 cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1314 INT_MIN, INT_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001316 if (q->offload) {
1317 if (!cl->level) {
1318 if (cl->leaf.q)
1319 cl->bstats = cl->leaf.q->bstats;
1320 else
1321 memset(&cl->bstats, 0, sizeof(cl->bstats));
1322 cl->bstats.bytes += cl->bstats_bias.bytes;
1323 cl->bstats.packets += cl->bstats_bias.packets;
1324 } else {
1325 htb_offload_aggregate_stats(q, cl);
1326 }
1327 }
1328
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001329 if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
1330 d, NULL, &cl->bstats) < 0 ||
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001331 gnet_stats_copy_rate_est(d, &cl->rate_est) < 0 ||
Eric Dumazet338ed9b2016-06-21 23:16:51 -07001332 gnet_stats_copy_queue(d, NULL, &qs, qlen) < 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 return -1;
1334
1335 return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1336}
1337
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001338static struct netdev_queue *
1339htb_select_queue(struct Qdisc *sch, struct tcmsg *tcm)
1340{
1341 struct net_device *dev = qdisc_dev(sch);
1342 struct tc_htb_qopt_offload offload_opt;
1343 int err;
1344
1345 offload_opt = (struct tc_htb_qopt_offload) {
1346 .command = TC_HTB_LEAF_QUERY_QUEUE,
1347 .classid = TC_H_MIN(tcm->tcm_parent),
1348 };
1349 err = htb_offload(dev, &offload_opt);
1350 if (err || offload_opt.qid >= dev->num_tx_queues)
1351 return NULL;
1352 return netdev_get_tx_queue(dev, offload_opt.qid);
1353}
1354
1355static struct Qdisc *
1356htb_graft_helper(struct netdev_queue *dev_queue, struct Qdisc *new_q)
1357{
1358 struct net_device *dev = dev_queue->dev;
1359 struct Qdisc *old_q;
1360
1361 if (dev->flags & IFF_UP)
1362 dev_deactivate(dev);
1363 old_q = dev_graft_qdisc(dev_queue, new_q);
1364 if (new_q)
1365 new_q->flags |= TCQ_F_ONETXQUEUE | TCQ_F_NOPARENT;
1366 if (dev->flags & IFF_UP)
1367 dev_activate(dev);
1368
1369 return old_q;
1370}
1371
1372static void htb_offload_move_qdisc(struct Qdisc *sch, u16 qid_old, u16 qid_new)
1373{
1374 struct netdev_queue *queue_old, *queue_new;
1375 struct net_device *dev = qdisc_dev(sch);
1376 struct Qdisc *qdisc;
1377
1378 queue_old = netdev_get_tx_queue(dev, qid_old);
1379 queue_new = netdev_get_tx_queue(dev, qid_new);
1380
1381 if (dev->flags & IFF_UP)
1382 dev_deactivate(dev);
1383 qdisc = dev_graft_qdisc(queue_old, NULL);
1384 qdisc->dev_queue = queue_new;
1385 qdisc = dev_graft_qdisc(queue_new, qdisc);
1386 if (dev->flags & IFF_UP)
1387 dev_activate(dev);
1388
1389 WARN_ON(!(qdisc->flags & TCQ_F_BUILTIN));
1390}
1391
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -05001393 struct Qdisc **old, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001395 struct netdev_queue *dev_queue = sch->dev_queue;
Stephen Hemminger87990462006-08-10 23:35:16 -07001396 struct htb_class *cl = (struct htb_class *)arg;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001397 struct htb_sched *q = qdisc_priv(sch);
1398 struct Qdisc *old_q;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001400 if (cl->level)
1401 return -EINVAL;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001402
1403 if (q->offload) {
1404 dev_queue = new->dev_queue;
1405 WARN_ON(dev_queue != cl->leaf.q->dev_queue);
1406 }
1407
1408 if (!new) {
1409 new = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
1410 cl->common.classid, extack);
1411 if (!new)
1412 return -ENOBUFS;
1413 }
1414
1415 if (q->offload) {
1416 htb_set_lockdep_class_child(new);
1417 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1418 qdisc_refcount_inc(new);
1419 old_q = htb_graft_helper(dev_queue, new);
1420 }
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001421
Cong Wang11957be2018-09-07 13:29:14 -07001422 *old = qdisc_replace(sch, new, &cl->leaf.q);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001423
1424 if (q->offload) {
1425 WARN_ON(old_q != *old);
1426 qdisc_put(old_q);
1427 }
1428
Patrick McHardy5b9a9cc2009-09-04 06:41:17 +00001429 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430}
1431
Stephen Hemminger87990462006-08-10 23:35:16 -07001432static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433{
Stephen Hemminger87990462006-08-10 23:35:16 -07001434 struct htb_class *cl = (struct htb_class *)arg;
Cong Wang11957be2018-09-07 13:29:14 -07001435 return !cl->level ? cl->leaf.q : NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436}
1437
Patrick McHardy256d61b2006-11-29 17:37:05 -08001438static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1439{
1440 struct htb_class *cl = (struct htb_class *)arg;
1441
Konstantin Khlebnikov95946652017-08-15 16:39:59 +03001442 htb_deactivate(qdisc_priv(sch), cl);
Patrick McHardy256d61b2006-11-29 17:37:05 -08001443}
1444
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001445static inline int htb_parent_last_child(struct htb_class *cl)
1446{
1447 if (!cl->parent)
1448 /* the root class */
1449 return 0;
Patrick McHardy42077592008-07-05 23:22:53 -07001450 if (cl->parent->children > 1)
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001451 /* not the last child */
1452 return 0;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001453 return 1;
1454}
1455
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001456static void htb_parent_to_leaf(struct Qdisc *sch, struct htb_class *cl,
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001457 struct Qdisc *new_q)
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001458{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001459 struct htb_sched *q = qdisc_priv(sch);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001460 struct htb_class *parent = cl->parent;
1461
Cong Wang11957be2018-09-07 13:29:14 -07001462 WARN_ON(cl->level || !cl->leaf.q || cl->prio_activity);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001463
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001464 if (parent->cmode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -07001465 htb_safe_rb_erase(&parent->pq_node,
1466 &q->hlevel[parent->level].wait_pq);
Jarek Poplawski3ba08b02008-05-03 20:46:29 -07001467
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001468 parent->level = 0;
Cong Wang11957be2018-09-07 13:29:14 -07001469 memset(&parent->inner, 0, sizeof(parent->inner));
1470 parent->leaf.q = new_q ? new_q : &noop_qdisc;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001471 parent->tokens = parent->buffer;
1472 parent->ctokens = parent->cbuffer;
Eric Dumazetd2de8752014-08-22 18:32:09 -07001473 parent->t_c = ktime_get_ns();
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001474 parent->cmode = HTB_CAN_SEND;
1475}
1476
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001477static void htb_parent_to_leaf_offload(struct Qdisc *sch,
1478 struct netdev_queue *dev_queue,
1479 struct Qdisc *new_q)
1480{
1481 struct Qdisc *old_q;
1482
1483 /* One ref for cl->leaf.q, the other for dev_queue->qdisc. */
1484 qdisc_refcount_inc(new_q);
1485 old_q = htb_graft_helper(dev_queue, new_q);
1486 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1487}
1488
1489static int htb_destroy_class_offload(struct Qdisc *sch, struct htb_class *cl,
1490 bool last_child, bool destroying,
1491 struct netlink_ext_ack *extack)
1492{
1493 struct tc_htb_qopt_offload offload_opt;
1494 struct Qdisc *q = cl->leaf.q;
1495 struct Qdisc *old = NULL;
1496 int err;
1497
1498 if (cl->level)
1499 return -EINVAL;
1500
1501 WARN_ON(!q);
1502 if (!destroying) {
1503 /* On destroy of HTB, two cases are possible:
1504 * 1. q is a normal qdisc, but q->dev_queue has noop qdisc.
1505 * 2. q is a noop qdisc (for nodes that were inner),
1506 * q->dev_queue is noop_netdev_queue.
1507 */
1508 old = htb_graft_helper(q->dev_queue, NULL);
1509 WARN_ON(!old);
1510 WARN_ON(old != q);
1511 }
1512
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001513 if (cl->parent) {
1514 cl->parent->bstats_bias.bytes += q->bstats.bytes;
1515 cl->parent->bstats_bias.packets += q->bstats.packets;
1516 }
1517
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001518 offload_opt = (struct tc_htb_qopt_offload) {
1519 .command = !last_child ? TC_HTB_LEAF_DEL :
1520 destroying ? TC_HTB_LEAF_DEL_LAST_FORCE :
1521 TC_HTB_LEAF_DEL_LAST,
1522 .classid = cl->common.classid,
1523 .extack = extack,
1524 };
1525 err = htb_offload(qdisc_dev(sch), &offload_opt);
1526
1527 if (!err || destroying)
1528 qdisc_put(old);
1529 else
1530 htb_graft_helper(q->dev_queue, old);
1531
1532 if (last_child)
1533 return err;
1534
1535 if (!err && offload_opt.moved_qid != 0) {
1536 if (destroying)
1537 q->dev_queue = netdev_get_tx_queue(qdisc_dev(sch),
1538 offload_opt.qid);
1539 else
1540 htb_offload_move_qdisc(sch, offload_opt.moved_qid,
1541 offload_opt.qid);
1542 }
1543
1544 return err;
1545}
1546
Stephen Hemminger87990462006-08-10 23:35:16 -07001547static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001549 if (!cl->level) {
Cong Wang11957be2018-09-07 13:29:14 -07001550 WARN_ON(!cl->leaf.q);
Vlad Buslov86bd4462018-09-24 19:22:50 +03001551 qdisc_put(cl->leaf.q);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 }
Eric Dumazet1c0d32f2016-12-04 09:48:16 -08001553 gen_kill_estimator(&cl->rate_est);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001554 tcf_block_put(cl->block);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555 kfree(cl);
1556}
1557
Stephen Hemminger87990462006-08-10 23:35:16 -07001558static void htb_destroy(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559{
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001560 struct net_device *dev = qdisc_dev(sch);
1561 struct tc_htb_qopt_offload offload_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 struct htb_sched *q = qdisc_priv(sch);
Sasha Levinb67bfe02013-02-27 17:06:00 -08001563 struct hlist_node *next;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001564 bool nonempty, changed;
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001565 struct htb_class *cl;
1566 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
Jarek Poplawski12247362009-02-01 01:13:22 -08001568 cancel_work_sync(&q->work);
Patrick McHardyfb983d42007-03-16 01:22:39 -07001569 qdisc_watchdog_cancel(&q->watchdog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 /* This line used to be after htb_destroy_class call below
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001571 * and surprisingly it worked in 2.4. But it must precede it
1572 * because filter need its target class alive to be able to call
1573 * unbind_filter on it (without Oops).
1574 */
Jiri Pirko6529eab2017-05-17 11:07:55 +02001575 tcf_block_put(q->block);
Stephen Hemminger87990462006-08-10 23:35:16 -07001576
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001577 for (i = 0; i < q->clhash.hashsize; i++) {
Konstantin Khlebnikov89890422017-08-15 16:35:21 +03001578 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Jiri Pirko6529eab2017-05-17 11:07:55 +02001579 tcf_block_put(cl->block);
Konstantin Khlebnikov89890422017-08-15 16:35:21 +03001580 cl->block = NULL;
1581 }
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001582 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001583
1584 do {
1585 nonempty = false;
1586 changed = false;
1587 for (i = 0; i < q->clhash.hashsize; i++) {
1588 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1589 common.hnode) {
1590 bool last_child;
1591
1592 if (!q->offload) {
1593 htb_destroy_class(sch, cl);
1594 continue;
1595 }
1596
1597 nonempty = true;
1598
1599 if (cl->level)
1600 continue;
1601
1602 changed = true;
1603
1604 last_child = htb_parent_last_child(cl);
1605 htb_destroy_class_offload(sch, cl, last_child,
1606 true, NULL);
1607 qdisc_class_hash_remove(&q->clhash,
1608 &cl->common);
1609 if (cl->parent)
1610 cl->parent->children--;
1611 if (last_child)
1612 htb_parent_to_leaf(sch, cl, NULL);
1613 htb_destroy_class(sch, cl);
1614 }
1615 }
1616 } while (changed);
1617 WARN_ON(nonempty);
1618
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001619 qdisc_class_hash_destroy(&q->clhash);
Eric Dumazeta5a9f5342016-06-13 20:21:56 -07001620 __qdisc_reset_queue(&q->direct_queue);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001621
1622 if (!q->offload)
1623 return;
1624
1625 offload_opt = (struct tc_htb_qopt_offload) {
1626 .command = TC_HTB_DESTROY,
1627 };
1628 htb_offload(dev, &offload_opt);
1629
1630 if (!q->direct_qdiscs)
1631 return;
1632 for (i = 0; i < q->num_direct_qdiscs && q->direct_qdiscs[i]; i++)
1633 qdisc_put(q->direct_qdiscs[i]);
1634 kfree(q->direct_qdiscs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635}
1636
Maxim Mikityanskiy4dd78a72021-01-19 14:08:12 +02001637static int htb_delete(struct Qdisc *sch, unsigned long arg,
1638 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001639{
1640 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001641 struct htb_class *cl = (struct htb_class *)arg;
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001642 struct Qdisc *new_q = NULL;
1643 int last_child = 0;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001644 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001645
Yang Yinglianga071d272013-12-23 17:38:59 +08001646 /* TODO: why don't allow to delete subtree ? references ? does
1647 * tc subsys guarantee us that in htb_destroy it holds no class
1648 * refs so that we can remove children safely there ?
1649 */
Patrick McHardy42077592008-07-05 23:22:53 -07001650 if (cl->children || cl->filter_cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001651 return -EBUSY;
Stephen Hemminger87990462006-08-10 23:35:16 -07001652
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001653 if (!cl->level && htb_parent_last_child(cl))
1654 last_child = 1;
1655
1656 if (q->offload) {
1657 err = htb_destroy_class_offload(sch, cl, last_child, false,
1658 extack);
1659 if (err)
1660 return err;
1661 }
1662
1663 if (last_child) {
1664 struct netdev_queue *dev_queue;
1665
1666 dev_queue = q->offload ? cl->leaf.q->dev_queue : sch->dev_queue;
1667 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
Alexander Aringa38a98822017-12-20 12:35:21 -05001668 cl->parent->common.classid,
1669 NULL);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001670 if (q->offload) {
1671 if (new_q)
1672 htb_set_lockdep_class_child(new_q);
1673 htb_parent_to_leaf_offload(sch, dev_queue, new_q);
1674 }
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001675 }
1676
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 sch_tree_lock(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001678
Paolo Abenie5f0e8f2019-03-28 16:53:13 +01001679 if (!cl->level)
1680 qdisc_purge_queue(cl->leaf.q);
Patrick McHardy814a175e2006-11-29 17:34:50 -08001681
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001682 /* delete from hash and active; remainder in destroy_class */
1683 qdisc_class_hash_remove(&q->clhash, &cl->common);
Jarek Poplawski26b284d2008-08-13 15:16:43 -07001684 if (cl->parent)
1685 cl->parent->children--;
Patrick McHardyc38c83c2007-03-27 14:04:24 -07001686
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687 if (cl->prio_activity)
Stephen Hemminger87990462006-08-10 23:35:16 -07001688 htb_deactivate(q, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001690 if (cl->cmode != HTB_CAN_SEND)
Eric Dumazetc9364632013-06-15 03:30:10 -07001691 htb_safe_rb_erase(&cl->pq_node,
1692 &q->hlevel[cl->level].wait_pq);
Patrick McHardyfbd8f132008-07-05 23:22:19 -07001693
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001694 if (last_child)
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001695 htb_parent_to_leaf(sch, cl, new_q);
Jarek Poplawski160d5e12006-12-08 00:26:56 -08001696
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697 sch_tree_unlock(sch);
WANG Cong143976c2017-08-24 16:51:29 -07001698
1699 htb_destroy_class(sch, cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 return 0;
1701}
1702
Stephen Hemminger87990462006-08-10 23:35:16 -07001703static int htb_change_class(struct Qdisc *sch, u32 classid,
Patrick McHardy1e904742008-01-22 22:11:17 -08001704 u32 parentid, struct nlattr **tca,
Alexander Aring793d81d2017-12-20 12:35:15 -05001705 unsigned long *arg, struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706{
1707 int err = -EINVAL;
1708 struct htb_sched *q = qdisc_priv(sch);
Stephen Hemminger87990462006-08-10 23:35:16 -07001709 struct htb_class *cl = (struct htb_class *)*arg, *parent;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001710 struct tc_htb_qopt_offload offload_opt;
Patrick McHardy1e904742008-01-22 22:11:17 -08001711 struct nlattr *opt = tca[TCA_OPTIONS];
Eric Dumazet6906f4e2013-03-06 06:49:21 +00001712 struct nlattr *tb[TCA_HTB_MAX + 1];
Vlad Buslov4ce70b42019-09-24 18:51:16 +03001713 struct Qdisc *parent_qdisc = NULL;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001714 struct netdev_queue *dev_queue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 struct tc_htb_opt *hopt;
Eric Dumazetdf62cdf2013-09-19 09:10:20 -07001716 u64 rate64, ceil64;
Li RongQingda01ec42018-03-30 10:11:21 +08001717 int warn = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719 /* extract all subattrs from opt attr */
Patrick McHardycee63722008-01-23 20:33:32 -08001720 if (!opt)
1721 goto failure;
1722
Johannes Berg8cb08172019-04-26 14:07:28 +02001723 err = nla_parse_nested_deprecated(tb, TCA_HTB_MAX, opt, htb_policy,
1724 NULL);
Patrick McHardycee63722008-01-23 20:33:32 -08001725 if (err < 0)
1726 goto failure;
1727
1728 err = -EINVAL;
Patrick McHardy27a34212008-01-23 20:35:39 -08001729 if (tb[TCA_HTB_PARMS] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 goto failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
Stephen Hemminger87990462006-08-10 23:35:16 -07001732 parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07001733
Patrick McHardy1e904742008-01-22 22:11:17 -08001734 hopt = nla_data(tb[TCA_HTB_PARMS]);
Eric Dumazet196d97f2012-11-05 16:40:49 +00001735 if (!hopt->rate.rate || !hopt->ceil.rate)
Stephen Hemminger87990462006-08-10 23:35:16 -07001736 goto failure;
1737
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +02001738 /* Keeping backward compatible with rate_table based iproute2 tc */
Yang Yingliang6b1dd852013-12-11 15:48:37 +08001739 if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
Alexander Aringe9bc3fa2017-12-20 12:35:18 -05001740 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB],
1741 NULL));
Yang Yingliang6b1dd852013-12-11 15:48:37 +08001742
1743 if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
Alexander Aringe9bc3fa2017-12-20 12:35:18 -05001744 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB],
1745 NULL));
Jesper Dangaard Brouer8a8e3d82013-08-14 23:47:11 +02001746
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001747 rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1748 ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1749
Stephen Hemminger87990462006-08-10 23:35:16 -07001750 if (!cl) { /* new class */
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001751 struct net_device *dev = qdisc_dev(sch);
1752 struct Qdisc *new_q, *old_q;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001753 int prio;
Patrick McHardyee39e102007-07-02 22:48:13 -07001754 struct {
Patrick McHardy1e904742008-01-22 22:11:17 -08001755 struct nlattr nla;
Patrick McHardyee39e102007-07-02 22:48:13 -07001756 struct gnet_estimator opt;
1757 } est = {
Patrick McHardy1e904742008-01-22 22:11:17 -08001758 .nla = {
1759 .nla_len = nla_attr_size(sizeof(est.opt)),
1760 .nla_type = TCA_RATE,
Patrick McHardyee39e102007-07-02 22:48:13 -07001761 },
1762 .opt = {
1763 /* 4s interval, 16s averaging constant */
1764 .interval = 2,
1765 .ewma_log = 2,
1766 },
1767 };
Stephen Hemminger3696f622006-08-10 23:36:01 -07001768
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 /* check for valid classid */
Joe Perchesf64f9e72009-11-29 16:55:45 -08001770 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1771 htb_find(classid, sch))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 goto failure;
1773
1774 /* check maximal depth */
1775 if (parent && parent->parent && parent->parent->level < 2) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001776 pr_err("htb: tree is too deep\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 goto failure;
1778 }
1779 err = -ENOBUFS;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001780 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1781 if (!cl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 goto failure;
Stephen Hemminger87990462006-08-10 23:35:16 -07001783
Alexander Aring8d1a77f2017-12-20 12:35:19 -05001784 err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +02001785 if (err) {
1786 kfree(cl);
1787 goto failure;
1788 }
Eric Dumazet64153ce2013-06-06 14:53:16 -07001789 if (htb_rate_est || tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001790 err = gen_new_estimator(&cl->bstats, NULL,
1791 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001792 NULL,
1793 qdisc_root_sleeping_running(sch),
Eric Dumazet64153ce2013-06-06 14:53:16 -07001794 tca[TCA_RATE] ? : &est.nla);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001795 if (err)
1796 goto err_block_put;
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001797 }
1798
Patrick McHardy42077592008-07-05 23:22:53 -07001799 cl->children = 0;
Stephen Hemminger3696f622006-08-10 23:36:01 -07001800 RB_CLEAR_NODE(&cl->pq_node);
1801
1802 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1803 RB_CLEAR_NODE(&cl->node[prio]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001805 cl->common.classid = classid;
1806
1807 /* Make sure nothing interrupts us in between of two
1808 * ndo_setup_tc calls.
1809 */
1810 ASSERT_RTNL();
1811
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001813 * so that can't be used inside of sch_tree_lock
1814 * -- thanks to Karlis Peisenieks
1815 */
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001816 if (!q->offload) {
1817 dev_queue = sch->dev_queue;
1818 } else if (!(parent && !parent->level)) {
1819 /* Assign a dev_queue to this classid. */
1820 offload_opt = (struct tc_htb_qopt_offload) {
1821 .command = TC_HTB_LEAF_ALLOC_QUEUE,
1822 .classid = cl->common.classid,
1823 .parent_classid = parent ?
1824 TC_H_MIN(parent->common.classid) :
1825 TC_HTB_CLASSID_ROOT,
1826 .rate = max_t(u64, hopt->rate.rate, rate64),
1827 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1828 .extack = extack,
1829 };
1830 err = htb_offload(dev, &offload_opt);
1831 if (err) {
1832 pr_err("htb: TC_HTB_LEAF_ALLOC_QUEUE failed with err = %d\n",
1833 err);
1834 goto err_kill_estimator;
1835 }
1836 dev_queue = netdev_get_tx_queue(dev, offload_opt.qid);
1837 } else { /* First child. */
1838 dev_queue = parent->leaf.q->dev_queue;
1839 old_q = htb_graft_helper(dev_queue, NULL);
1840 WARN_ON(old_q != parent->leaf.q);
1841 offload_opt = (struct tc_htb_qopt_offload) {
1842 .command = TC_HTB_LEAF_TO_INNER,
1843 .classid = cl->common.classid,
1844 .parent_classid =
1845 TC_H_MIN(parent->common.classid),
1846 .rate = max_t(u64, hopt->rate.rate, rate64),
1847 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1848 .extack = extack,
1849 };
1850 err = htb_offload(dev, &offload_opt);
1851 if (err) {
1852 pr_err("htb: TC_HTB_LEAF_TO_INNER failed with err = %d\n",
1853 err);
1854 htb_graft_helper(dev_queue, old_q);
1855 goto err_kill_estimator;
1856 }
Maxim Mikityanskiy83271582021-01-19 14:08:14 +02001857 parent->bstats_bias.bytes += old_q->bstats.bytes;
1858 parent->bstats_bias.packets += old_q->bstats.packets;
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001859 qdisc_put(old_q);
1860 }
1861 new_q = qdisc_create_dflt(dev_queue, &pfifo_qdisc_ops,
Alexander Aringa38a98822017-12-20 12:35:21 -05001862 classid, NULL);
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001863 if (q->offload) {
1864 if (new_q) {
1865 htb_set_lockdep_class_child(new_q);
1866 /* One ref for cl->leaf.q, the other for
1867 * dev_queue->qdisc.
1868 */
1869 qdisc_refcount_inc(new_q);
1870 }
1871 old_q = htb_graft_helper(dev_queue, new_q);
1872 /* No qdisc_put needed. */
1873 WARN_ON(!(old_q->flags & TCQ_F_BUILTIN));
1874 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 sch_tree_lock(sch);
1876 if (parent && !parent->level) {
1877 /* turn parent into inner node */
Paolo Abenie5f0e8f2019-03-28 16:53:13 +01001878 qdisc_purge_queue(parent->leaf.q);
Vlad Buslov4ce70b42019-09-24 18:51:16 +03001879 parent_qdisc = parent->leaf.q;
Stephen Hemminger87990462006-08-10 23:35:16 -07001880 if (parent->prio_activity)
1881 htb_deactivate(q, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
1883 /* remove from evt list because of level change */
1884 if (parent->cmode != HTB_CAN_SEND) {
Eric Dumazetc9364632013-06-15 03:30:10 -07001885 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001886 parent->cmode = HTB_CAN_SEND;
1887 }
1888 parent->level = (parent->parent ? parent->parent->level
Stephen Hemminger87990462006-08-10 23:35:16 -07001889 : TC_HTB_MAXDEPTH) - 1;
Cong Wang11957be2018-09-07 13:29:14 -07001890 memset(&parent->inner, 0, sizeof(parent->inner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001891 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001892
Linus Torvalds1da177e2005-04-16 15:20:36 -07001893 /* leaf (we) needs elementary qdisc */
Cong Wang11957be2018-09-07 13:29:14 -07001894 cl->leaf.q = new_q ? new_q : &noop_qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895
Stephen Hemminger87990462006-08-10 23:35:16 -07001896 cl->parent = parent;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
1898 /* set class to be in HTB_CAN_SEND state */
Jiri Pirkob9a7afd2013-02-12 00:12:02 +00001899 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1900 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
Eric Dumazet5343a7f2013-06-04 07:11:48 +00001901 cl->mbuffer = 60ULL * NSEC_PER_SEC; /* 1min */
Eric Dumazetd2de8752014-08-22 18:32:09 -07001902 cl->t_c = ktime_get_ns();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903 cl->cmode = HTB_CAN_SEND;
1904
1905 /* attach to the hash list and parent's family */
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001906 qdisc_class_hash_insert(&q->clhash, &cl->common);
Patrick McHardy42077592008-07-05 23:22:53 -07001907 if (parent)
1908 parent->children++;
Cong Wang11957be2018-09-07 13:29:14 -07001909 if (cl->leaf.q != &noop_qdisc)
1910 qdisc_hash_add(cl->leaf.q, true);
Patrick McHardyee39e102007-07-02 22:48:13 -07001911 } else {
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001912 if (tca[TCA_RATE]) {
John Fastabend22e0f8b2014-09-28 11:52:56 -07001913 err = gen_replace_estimator(&cl->bstats, NULL,
1914 &cl->rate_est,
Eric Dumazetedb09eb2016-06-06 09:37:16 -07001915 NULL,
1916 qdisc_root_sleeping_running(sch),
Stephen Hemminger71bcb092008-11-25 21:13:31 -08001917 tca[TCA_RATE]);
1918 if (err)
1919 return err;
1920 }
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001921
1922 if (q->offload) {
1923 struct net_device *dev = qdisc_dev(sch);
1924
1925 offload_opt = (struct tc_htb_qopt_offload) {
1926 .command = TC_HTB_NODE_MODIFY,
1927 .classid = cl->common.classid,
1928 .rate = max_t(u64, hopt->rate.rate, rate64),
1929 .ceil = max_t(u64, hopt->ceil.rate, ceil64),
1930 .extack = extack,
1931 };
1932 err = htb_offload(dev, &offload_opt);
1933 if (err)
1934 /* Estimator was replaced, and rollback may fail
1935 * as well, so we don't try to recover it, and
1936 * the estimator won't work property with the
1937 * offload anyway, because bstats are updated
1938 * only when the stats are queried.
1939 */
1940 return err;
1941 }
1942
Stephen Hemminger87990462006-08-10 23:35:16 -07001943 sch_tree_lock(sch);
Patrick McHardyee39e102007-07-02 22:48:13 -07001944 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945
Yang Yingliang1598f7c2013-12-10 14:59:28 +08001946 psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1947 psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1948
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 /* it used to be a nasty bug here, we have to check that node
Cong Wang11957be2018-09-07 13:29:14 -07001950 * is really leaf before changing cl->leaf !
Eric Dumazetcc7ec452011-01-19 19:26:56 +00001951 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 if (!cl->level) {
Yang Yingliang1598f7c2013-12-10 14:59:28 +08001953 u64 quantum = cl->rate.rate_bytes_ps;
1954
1955 do_div(quantum, q->rate2quantum);
1956 cl->quantum = min_t(u64, quantum, INT_MAX);
1957
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001958 if (!hopt->quantum && cl->quantum < 1000) {
Li RongQingda01ec42018-03-30 10:11:21 +08001959 warn = -1;
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001960 cl->quantum = 1000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 }
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001962 if (!hopt->quantum && cl->quantum > 200000) {
Li RongQingda01ec42018-03-30 10:11:21 +08001963 warn = 1;
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001964 cl->quantum = 200000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001965 }
1966 if (hopt->quantum)
Jarek Poplawskic19f7a32008-12-03 21:09:45 -08001967 cl->quantum = hopt->quantum;
1968 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1969 cl->prio = TC_HTB_NUMPRIO - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001970 }
1971
Jiri Pirko324f5aa2013-02-12 00:11:59 +00001972 cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
Vimalkumarf3ad8572013-09-10 17:36:37 -07001973 cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
Vimalkumar56b765b2012-10-31 06:04:11 +00001974
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975 sch_tree_unlock(sch);
Vlad Buslov4ce70b42019-09-24 18:51:16 +03001976 qdisc_put(parent_qdisc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001977
Li RongQingda01ec42018-03-30 10:11:21 +08001978 if (warn)
1979 pr_warn("HTB: quantum of class %X is %s. Consider r2q change.\n",
1980 cl->common.classid, (warn == -1 ? "small" : "big"));
1981
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07001982 qdisc_class_hash_grow(sch, &q->clhash);
1983
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984 *arg = (unsigned long)cl;
1985 return 0;
1986
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02001987err_kill_estimator:
1988 gen_kill_estimator(&cl->rate_est);
1989err_block_put:
1990 tcf_block_put(cl->block);
1991 kfree(cl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001992failure:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 return err;
1994}
1995
Alexander Aringcbaacc42017-12-20 12:35:16 -05001996static struct tcf_block *htb_tcf_block(struct Qdisc *sch, unsigned long arg,
1997 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998{
1999 struct htb_sched *q = qdisc_priv(sch);
2000 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002001
Jiri Pirko6529eab2017-05-17 11:07:55 +02002002 return cl ? cl->block : q->block;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002003}
2004
2005static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
Stephen Hemminger87990462006-08-10 23:35:16 -07002006 u32 classid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007{
Stephen Hemminger87990462006-08-10 23:35:16 -07002008 struct htb_class *cl = htb_find(classid, sch);
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002009
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 /*if (cl && !cl->level) return 0;
Eric Dumazetcc7ec452011-01-19 19:26:56 +00002011 * The line above used to be there to prevent attaching filters to
2012 * leaves. But at least tc_index filter uses this just to get class
2013 * for other reasons so that we have to allow for it.
2014 * ----
2015 * 19.6.2002 As Werner explained it is ok - bind filter is just
2016 * another way to "lock" the class - unlike "get" this lock can
2017 * be broken by class during destroy IIUC.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 */
Stephen Hemminger87990462006-08-10 23:35:16 -07002019 if (cl)
2020 cl->filter_cnt++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021 return (unsigned long)cl;
2022}
2023
2024static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
2025{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 struct htb_class *cl = (struct htb_class *)arg;
Stephen Hemminger3bf72952006-08-10 23:31:08 -07002027
Stephen Hemminger87990462006-08-10 23:35:16 -07002028 if (cl)
2029 cl->filter_cnt--;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030}
2031
2032static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2033{
2034 struct htb_sched *q = qdisc_priv(sch);
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002035 struct htb_class *cl;
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002036 unsigned int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
2038 if (arg->stop)
2039 return;
2040
Patrick McHardyf4c1f3e2008-07-05 23:22:35 -07002041 for (i = 0; i < q->clhash.hashsize; i++) {
Sasha Levinb67bfe02013-02-27 17:06:00 -08002042 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 if (arg->count < arg->skip) {
2044 arg->count++;
2045 continue;
2046 }
2047 if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
2048 arg->stop = 1;
2049 return;
2050 }
2051 arg->count++;
2052 }
2053 }
2054}
2055
Eric Dumazet20fea082007-11-14 01:44:41 -08002056static const struct Qdisc_class_ops htb_class_ops = {
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002057 .select_queue = htb_select_queue,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 .graft = htb_graft,
2059 .leaf = htb_leaf,
Patrick McHardy256d61b2006-11-29 17:37:05 -08002060 .qlen_notify = htb_qlen_notify,
WANG Cong143976c2017-08-24 16:51:29 -07002061 .find = htb_search,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 .change = htb_change_class,
2063 .delete = htb_delete,
2064 .walk = htb_walk,
Jiri Pirko6529eab2017-05-17 11:07:55 +02002065 .tcf_block = htb_tcf_block,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 .bind_tcf = htb_bind_filter,
2067 .unbind_tcf = htb_unbind_filter,
2068 .dump = htb_dump_class,
2069 .dump_stats = htb_dump_class_stats,
2070};
2071
Eric Dumazet20fea082007-11-14 01:44:41 -08002072static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073 .cl_ops = &htb_class_ops,
2074 .id = "htb",
2075 .priv_size = sizeof(struct htb_sched),
2076 .enqueue = htb_enqueue,
2077 .dequeue = htb_dequeue,
Jarek Poplawski77be1552008-10-31 00:47:01 -07002078 .peek = qdisc_peek_dequeued,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 .init = htb_init,
Maxim Mikityanskiyd03b1952021-01-19 14:08:13 +02002080 .attach = htb_attach,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 .reset = htb_reset,
2082 .destroy = htb_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 .dump = htb_dump,
2084 .owner = THIS_MODULE,
2085};
2086
2087static int __init htb_module_init(void)
2088{
Stephen Hemminger87990462006-08-10 23:35:16 -07002089 return register_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090}
Stephen Hemminger87990462006-08-10 23:35:16 -07002091static void __exit htb_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092{
Stephen Hemminger87990462006-08-10 23:35:16 -07002093 unregister_qdisc(&htb_qdisc_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094}
Stephen Hemminger87990462006-08-10 23:35:16 -07002095
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096module_init(htb_module_init)
2097module_exit(htb_module_exit)
2098MODULE_LICENSE("GPL");