blob: 1073c76d05c45ffa3d3ab4cf2d24441c80d5efcc [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net/sched/sch_gred.c Generic Random Early Detection queue.
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors: J Hadi Salim (hadi@cyberus.ca) 1998-2002
6 *
7 * 991129: - Bug fix with grio mode
8 * - a better sing. AvgQ mode with Grio(WRED)
Zheng Yongjun37f2ad22021-05-31 10:00:48 +08009 * - A finer grained VQ dequeue based on suggestion
Linus Torvalds1da177e2005-04-16 15:20:36 -070010 * from Ren Liu
11 * - More error checks
12 *
Thomas Graf1e4dfaf92005-11-05 21:14:25 +010013 * For all the glorious comments look at include/net/red.h
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 */
15
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018#include <linux/types.h>
19#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <linux/skbuff.h>
Jakub Kicinski890d8d22018-11-19 15:21:42 -080021#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022#include <net/pkt_sched.h>
Thomas Graf22b33422005-11-05 21:14:16 +010023#include <net/red.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Thomas Graff62d6b92005-11-05 21:14:15 +010025#define GRED_DEF_PRIO (MAX_DPs / 2)
Thomas Graf716a1b42005-11-05 21:14:20 +010026#define GRED_VQ_MASK (MAX_DPs - 1)
Thomas Graff62d6b92005-11-05 21:14:15 +010027
Jakub Kicinski25fc1982018-11-14 22:23:50 -080028#define GRED_VQ_RED_FLAGS (TC_RED_ECN | TC_RED_HARDDROP)
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030struct gred_sched_data;
31struct gred_sched;
32
Eric Dumazetcc7ec452011-01-19 19:26:56 +000033struct gred_sched_data {
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 u32 limit; /* HARD maximal queue length */
Eric Dumazeta73ed262011-12-09 02:46:45 +000035 u32 DP; /* the drop parameters */
Jakub Kicinski25fc1982018-11-14 22:23:50 -080036 u32 red_flags; /* virtualQ version of red_flags */
Jakub Kicinski9f5cd0c2018-11-14 22:23:48 -080037 u64 bytesin; /* bytes seen on virtualQ so far*/
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 u32 packetsin; /* packets seen on virtualQ so far*/
39 u32 backlog; /* bytes on the virtualQ */
Thomas Graf1e4dfaf92005-11-05 21:14:25 +010040 u8 prio; /* the prio of this vq */
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Thomas Graf22b33422005-11-05 21:14:16 +010042 struct red_parms parms;
Eric Dumazeteeca6682012-01-05 02:25:16 +000043 struct red_vars vars;
Thomas Graf22b33422005-11-05 21:14:16 +010044 struct red_stats stats;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045};
46
Thomas Grafdea3f622005-11-05 21:14:09 +010047enum {
48 GRED_WRED_MODE = 1,
Thomas Grafd6fd4e92005-11-05 21:14:10 +010049 GRED_RIO_MODE,
Thomas Grafdea3f622005-11-05 21:14:09 +010050};
51
Eric Dumazetcc7ec452011-01-19 19:26:56 +000052struct gred_sched {
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 struct gred_sched_data *tab[MAX_DPs];
Thomas Grafdea3f622005-11-05 21:14:09 +010054 unsigned long flags;
Thomas Grafb38c7ee2005-11-05 21:14:27 +010055 u32 red_flags;
Thomas Graf1e4dfaf92005-11-05 21:14:25 +010056 u32 DPs;
57 u32 def;
Eric Dumazeteeca6682012-01-05 02:25:16 +000058 struct red_vars wred_set;
Arnd Bergmannf25c0512021-10-26 12:07:11 +020059 struct tc_gred_qopt_offload *opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -070060};
61
Thomas Grafdea3f622005-11-05 21:14:09 +010062static inline int gred_wred_mode(struct gred_sched *table)
63{
64 return test_bit(GRED_WRED_MODE, &table->flags);
65}
66
67static inline void gred_enable_wred_mode(struct gred_sched *table)
68{
69 __set_bit(GRED_WRED_MODE, &table->flags);
70}
71
72static inline void gred_disable_wred_mode(struct gred_sched *table)
73{
74 __clear_bit(GRED_WRED_MODE, &table->flags);
75}
76
Thomas Grafd6fd4e92005-11-05 21:14:10 +010077static inline int gred_rio_mode(struct gred_sched *table)
78{
79 return test_bit(GRED_RIO_MODE, &table->flags);
80}
81
82static inline void gred_enable_rio_mode(struct gred_sched *table)
83{
84 __set_bit(GRED_RIO_MODE, &table->flags);
85}
86
87static inline void gred_disable_rio_mode(struct gred_sched *table)
88{
89 __clear_bit(GRED_RIO_MODE, &table->flags);
90}
91
Thomas Grafdea3f622005-11-05 21:14:09 +010092static inline int gred_wred_mode_check(struct Qdisc *sch)
93{
94 struct gred_sched *table = qdisc_priv(sch);
95 int i;
96
97 /* Really ugly O(n^2) but shouldn't be necessary too frequent. */
98 for (i = 0; i < table->DPs; i++) {
99 struct gred_sched_data *q = table->tab[i];
100 int n;
101
102 if (q == NULL)
103 continue;
104
David Wardc22e4642012-09-13 05:22:33 +0000105 for (n = i + 1; n < table->DPs; n++)
106 if (table->tab[n] && table->tab[n]->prio == q->prio)
Thomas Grafdea3f622005-11-05 21:14:09 +0100107 return 1;
108 }
109
110 return 0;
111}
112
Thomas Graf22b33422005-11-05 21:14:16 +0100113static inline unsigned int gred_backlog(struct gred_sched *table,
114 struct gred_sched_data *q,
115 struct Qdisc *sch)
116{
117 if (gred_wred_mode(table))
118 return sch->qstats.backlog;
119 else
120 return q->backlog;
121}
122
Thomas Graf716a1b42005-11-05 21:14:20 +0100123static inline u16 tc_index_to_dp(struct sk_buff *skb)
124{
125 return skb->tc_index & GRED_VQ_MASK;
126}
127
Eric Dumazeteeca6682012-01-05 02:25:16 +0000128static inline void gred_load_wred_set(const struct gred_sched *table,
Thomas Graf70517032005-11-05 21:14:23 +0100129 struct gred_sched_data *q)
130{
Eric Dumazeteeca6682012-01-05 02:25:16 +0000131 q->vars.qavg = table->wred_set.qavg;
132 q->vars.qidlestart = table->wred_set.qidlestart;
Thomas Graf70517032005-11-05 21:14:23 +0100133}
134
135static inline void gred_store_wred_set(struct gred_sched *table,
136 struct gred_sched_data *q)
137{
Eric Dumazeteeca6682012-01-05 02:25:16 +0000138 table->wred_set.qavg = q->vars.qavg;
David Wardba1bf472012-09-13 05:22:35 +0000139 table->wred_set.qidlestart = q->vars.qidlestart;
Thomas Graf70517032005-11-05 21:14:23 +0100140}
141
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800142static int gred_use_ecn(struct gred_sched_data *q)
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100143{
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800144 return q->red_flags & TC_RED_ECN;
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100145}
146
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800147static int gred_use_harddrop(struct gred_sched_data *q)
Thomas Grafbdc450a2005-11-05 21:14:28 +0100148{
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800149 return q->red_flags & TC_RED_HARDDROP;
Thomas Grafbdc450a2005-11-05 21:14:28 +0100150}
151
Jakub Kicinski72111012018-11-14 22:23:51 -0800152static bool gred_per_vq_red_flags_used(struct gred_sched *table)
153{
154 unsigned int i;
155
156 /* Local per-vq flags couldn't have been set unless global are 0 */
157 if (table->red_flags)
158 return false;
159 for (i = 0; i < MAX_DPs; i++)
160 if (table->tab[i] && table->tab[i]->red_flags)
161 return true;
162 return false;
163}
164
Eric Dumazet520ac302016-06-21 23:16:49 -0700165static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch,
166 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167{
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000168 struct gred_sched_data *q = NULL;
169 struct gred_sched *t = qdisc_priv(sch);
Thomas Graf22b33422005-11-05 21:14:16 +0100170 unsigned long qavg = 0;
Thomas Graf4a591832005-11-05 21:14:22 +0100171 u16 dp = tc_index_to_dp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000173 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
Thomas Graf18e3fb842005-11-05 21:14:21 +0100174 dp = t->def;
175
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000176 q = t->tab[dp];
177 if (!q) {
Thomas Graf18e3fb842005-11-05 21:14:21 +0100178 /* Pass through packets not assigned to a DP
179 * if no default DP has been configured. This
180 * allows for DP flows to be left untouched.
181 */
David Warda3eb95f2015-05-09 22:01:46 -0400182 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <=
183 sch->limit))
Thomas Graf18e3fb842005-11-05 21:14:21 +0100184 return qdisc_enqueue_tail(skb, sch);
185 else
186 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 }
Thomas Graf18e3fb842005-11-05 21:14:21 +0100188
Eric Dumazeteeca6682012-01-05 02:25:16 +0000189 /* fix tc_index? --could be controversial but needed for
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 requeueing */
Thomas Graf18e3fb842005-11-05 21:14:21 +0100191 skb->tc_index = (skb->tc_index & ~GRED_VQ_MASK) | dp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 }
193
David Warde29fe832012-09-13 05:22:32 +0000194 /* sum up all the qaves of prios < ours to get the new qave */
Thomas Grafd6fd4e92005-11-05 21:14:10 +0100195 if (!gred_wred_mode(t) && gred_rio_mode(t)) {
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100196 int i;
197
198 for (i = 0; i < t->DPs; i++) {
199 if (t->tab[i] && t->tab[i]->prio < q->prio &&
Eric Dumazeteeca6682012-01-05 02:25:16 +0000200 !red_is_idling(&t->tab[i]->vars))
201 qavg += t->tab[i]->vars.qavg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 }
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 }
205
206 q->packetsin++;
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700207 q->bytesin += qdisc_pkt_len(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100209 if (gred_wred_mode(t))
Thomas Graf70517032005-11-05 21:14:23 +0100210 gred_load_wred_set(t, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Eric Dumazeteeca6682012-01-05 02:25:16 +0000212 q->vars.qavg = red_calc_qavg(&q->parms,
213 &q->vars,
214 gred_backlog(t, q, sch));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Eric Dumazeteeca6682012-01-05 02:25:16 +0000216 if (red_is_idling(&q->vars))
217 red_end_of_idle_period(&q->vars);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Thomas Grafdea3f622005-11-05 21:14:09 +0100219 if (gred_wred_mode(t))
Thomas Graf70517032005-11-05 21:14:23 +0100220 gred_store_wred_set(t, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
Eric Dumazeteeca6682012-01-05 02:25:16 +0000222 switch (red_action(&q->parms, &q->vars, q->vars.qavg + qavg)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000223 case RED_DONT_MARK:
224 break;
Thomas Graf22b33422005-11-05 21:14:16 +0100225
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000226 case RED_PROB_MARK:
John Fastabend25331d62014-09-28 11:53:29 -0700227 qdisc_qstats_overlimit(sch);
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800228 if (!gred_use_ecn(q) || !INET_ECN_set_ce(skb)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000229 q->stats.prob_drop++;
230 goto congestion_drop;
231 }
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100232
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000233 q->stats.prob_mark++;
234 break;
Thomas Graf22b33422005-11-05 21:14:16 +0100235
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000236 case RED_HARD_MARK:
John Fastabend25331d62014-09-28 11:53:29 -0700237 qdisc_qstats_overlimit(sch);
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800238 if (gred_use_harddrop(q) || !gred_use_ecn(q) ||
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000239 !INET_ECN_set_ce(skb)) {
240 q->stats.forced_drop++;
241 goto congestion_drop;
242 }
243 q->stats.forced_mark++;
244 break;
Thomas Graf22b33422005-11-05 21:14:16 +0100245 }
246
David Ward145a42b2015-05-09 22:01:47 -0400247 if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700248 q->backlog += qdisc_pkt_len(skb);
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100249 return qdisc_enqueue_tail(skb, sch);
Thomas Graf22b33422005-11-05 21:14:16 +0100250 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
Thomas Graf22b33422005-11-05 21:14:16 +0100252 q->stats.pdrop++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700254 return qdisc_drop(skb, sch, to_free);
Thomas Grafc3b553c2005-11-05 21:14:18 +0100255
256congestion_drop:
Eric Dumazet520ac302016-06-21 23:16:49 -0700257 qdisc_drop(skb, sch, to_free);
Thomas Grafc3b553c2005-11-05 21:14:18 +0100258 return NET_XMIT_CN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000261static struct sk_buff *gred_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262{
263 struct sk_buff *skb;
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100264 struct gred_sched *t = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100266 skb = qdisc_dequeue_head(sch);
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268 if (skb) {
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100269 struct gred_sched_data *q;
Thomas Graf18e3fb842005-11-05 21:14:21 +0100270 u16 dp = tc_index_to_dp(skb);
271
272 if (dp >= t->DPs || (q = t->tab[dp]) == NULL) {
Joe Perchese87cc472012-05-13 21:56:26 +0000273 net_warn_ratelimited("GRED: Unable to relocate VQ 0x%x after dequeue, screwing up backlog\n",
274 tc_index_to_dp(skb));
Thomas Graf18e3fb842005-11-05 21:14:21 +0100275 } else {
Jussi Kivilinna0abf77e2008-07-20 00:08:27 -0700276 q->backlog -= qdisc_pkt_len(skb);
Thomas Graf18e3fb842005-11-05 21:14:21 +0100277
David Wardba1bf472012-09-13 05:22:35 +0000278 if (gred_wred_mode(t)) {
279 if (!sch->qstats.backlog)
280 red_start_of_idle_period(&t->wred_set);
281 } else {
282 if (!q->backlog)
283 red_start_of_idle_period(&q->vars);
284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 }
Thomas Graf18e3fb842005-11-05 21:14:21 +0100286
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 return skb;
288 }
289
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 return NULL;
291}
292
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000293static void gred_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294{
295 int i;
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100296 struct gred_sched *t = qdisc_priv(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297
Thomas Grafedf7a7b2005-11-05 21:14:19 +0100298 qdisc_reset_queue(sch);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900300 for (i = 0; i < t->DPs; i++) {
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100301 struct gred_sched_data *q = t->tab[i];
302
303 if (!q)
304 continue;
305
Eric Dumazeteeca6682012-01-05 02:25:16 +0000306 red_restart(&q->vars);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307 q->backlog = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308 }
309}
310
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800311static void gred_offload(struct Qdisc *sch, enum tc_gred_command command)
312{
313 struct gred_sched *table = qdisc_priv(sch);
314 struct net_device *dev = qdisc_dev(sch);
Arnd Bergmannf25c0512021-10-26 12:07:11 +0200315 struct tc_gred_qopt_offload *opt = table->opt;
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800316
317 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
318 return;
319
Arnd Bergmannf25c0512021-10-26 12:07:11 +0200320 memset(opt, 0, sizeof(*opt));
321 opt->command = command;
322 opt->handle = sch->handle;
323 opt->parent = sch->parent;
324
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800325 if (command == TC_GRED_REPLACE) {
326 unsigned int i;
327
Arnd Bergmannf25c0512021-10-26 12:07:11 +0200328 opt->set.grio_on = gred_rio_mode(table);
329 opt->set.wred_on = gred_wred_mode(table);
330 opt->set.dp_cnt = table->DPs;
331 opt->set.dp_def = table->def;
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800332
333 for (i = 0; i < table->DPs; i++) {
334 struct gred_sched_data *q = table->tab[i];
335
336 if (!q)
337 continue;
Arnd Bergmannf25c0512021-10-26 12:07:11 +0200338 opt->set.tab[i].present = true;
339 opt->set.tab[i].limit = q->limit;
340 opt->set.tab[i].prio = q->prio;
341 opt->set.tab[i].min = q->parms.qth_min >> q->parms.Wlog;
342 opt->set.tab[i].max = q->parms.qth_max >> q->parms.Wlog;
343 opt->set.tab[i].is_ecn = gred_use_ecn(q);
344 opt->set.tab[i].is_harddrop = gred_use_harddrop(q);
345 opt->set.tab[i].probability = q->parms.max_P;
346 opt->set.tab[i].backlog = &q->backlog;
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800347 }
Arnd Bergmannf25c0512021-10-26 12:07:11 +0200348 opt->set.qstats = &sch->qstats;
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800349 }
350
Arnd Bergmannf25c0512021-10-26 12:07:11 +0200351 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_GRED, opt);
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800352}
353
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800354static int gred_offload_dump_stats(struct Qdisc *sch)
355{
356 struct gred_sched *table = qdisc_priv(sch);
357 struct tc_gred_qopt_offload *hw_stats;
Ahmed S. Darwishf56940d2021-10-16 10:49:08 +0200358 u64 bytes = 0, packets = 0;
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800359 unsigned int i;
360 int ret;
361
362 hw_stats = kzalloc(sizeof(*hw_stats), GFP_KERNEL);
363 if (!hw_stats)
364 return -ENOMEM;
365
366 hw_stats->command = TC_GRED_STATS;
367 hw_stats->handle = sch->handle;
368 hw_stats->parent = sch->parent;
369
Ahmed S. Darwish67c9e62702021-10-16 10:49:07 +0200370 for (i = 0; i < MAX_DPs; i++) {
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +0200371 gnet_stats_basic_sync_init(&hw_stats->stats.bstats[i]);
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800372 if (table->tab[i])
373 hw_stats->stats.xstats[i] = &table->tab[i]->stats;
Ahmed S. Darwish67c9e62702021-10-16 10:49:07 +0200374 }
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800375
376 ret = qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_GRED, hw_stats);
377 /* Even if driver returns failure adjust the stats - in case offload
378 * ended but driver still wants to adjust the values.
379 */
380 for (i = 0; i < MAX_DPs; i++) {
381 if (!table->tab[i])
382 continue;
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +0200383 table->tab[i]->packetsin += u64_stats_read(&hw_stats->stats.bstats[i].packets);
384 table->tab[i]->bytesin += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800385 table->tab[i]->backlog += hw_stats->stats.qstats[i].backlog;
386
Ahmed S. Darwish50dc9a82021-10-16 10:49:09 +0200387 bytes += u64_stats_read(&hw_stats->stats.bstats[i].bytes);
388 packets += u64_stats_read(&hw_stats->stats.bstats[i].packets);
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800389 sch->qstats.qlen += hw_stats->stats.qstats[i].qlen;
390 sch->qstats.backlog += hw_stats->stats.qstats[i].backlog;
391 sch->qstats.drops += hw_stats->stats.qstats[i].drops;
392 sch->qstats.requeues += hw_stats->stats.qstats[i].requeues;
393 sch->qstats.overlimits += hw_stats->stats.qstats[i].overlimits;
394 }
Ahmed S. Darwishf56940d2021-10-16 10:49:08 +0200395 _bstats_update(&sch->bstats, bytes, packets);
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800396
397 kfree(hw_stats);
398 return ret;
399}
400
Thomas Graf66396072005-11-05 21:14:13 +0100401static inline void gred_destroy_vq(struct gred_sched_data *q)
402{
403 kfree(q);
404}
405
Jakub Kicinski4777be02018-11-14 22:23:47 -0800406static int gred_change_table_def(struct Qdisc *sch, struct nlattr *dps,
407 struct netlink_ext_ack *extack)
Thomas Graf66396072005-11-05 21:14:13 +0100408{
409 struct gred_sched *table = qdisc_priv(sch);
410 struct tc_gred_sopt *sopt;
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800411 bool red_flags_changed;
Thomas Graf66396072005-11-05 21:14:13 +0100412 int i;
413
Alexander Aringac8ef4a2017-12-20 12:35:11 -0500414 if (!dps)
Thomas Graf66396072005-11-05 21:14:13 +0100415 return -EINVAL;
416
Patrick McHardy1e904742008-01-22 22:11:17 -0800417 sopt = nla_data(dps);
Thomas Graf66396072005-11-05 21:14:13 +0100418
Jakub Kicinski4777be02018-11-14 22:23:47 -0800419 if (sopt->DPs > MAX_DPs) {
420 NL_SET_ERR_MSG_MOD(extack, "number of virtual queues too high");
Thomas Graf66396072005-11-05 21:14:13 +0100421 return -EINVAL;
Jakub Kicinski4777be02018-11-14 22:23:47 -0800422 }
423 if (sopt->DPs == 0) {
424 NL_SET_ERR_MSG_MOD(extack,
425 "number of virtual queues can't be 0");
426 return -EINVAL;
427 }
428 if (sopt->def_DP >= sopt->DPs) {
429 NL_SET_ERR_MSG_MOD(extack, "default virtual queue above virtual queue count");
430 return -EINVAL;
431 }
Jakub Kicinski72111012018-11-14 22:23:51 -0800432 if (sopt->flags && gred_per_vq_red_flags_used(table)) {
433 NL_SET_ERR_MSG_MOD(extack, "can't set per-Qdisc RED flags when per-virtual queue flags are used");
434 return -EINVAL;
435 }
Thomas Graf66396072005-11-05 21:14:13 +0100436
437 sch_tree_lock(sch);
438 table->DPs = sopt->DPs;
439 table->def = sopt->def_DP;
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800440 red_flags_changed = table->red_flags != sopt->flags;
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100441 table->red_flags = sopt->flags;
Thomas Graf66396072005-11-05 21:14:13 +0100442
443 /*
444 * Every entry point to GRED is synchronized with the above code
445 * and the DP is checked against DPs, i.e. shadowed VQs can no
446 * longer be found so we can unlock right here.
447 */
448 sch_tree_unlock(sch);
449
450 if (sopt->grio) {
451 gred_enable_rio_mode(table);
452 gred_disable_wred_mode(table);
453 if (gred_wred_mode_check(sch))
454 gred_enable_wred_mode(table);
455 } else {
456 gred_disable_rio_mode(table);
457 gred_disable_wred_mode(table);
458 }
459
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800460 if (red_flags_changed)
461 for (i = 0; i < table->DPs; i++)
462 if (table->tab[i])
463 table->tab[i]->red_flags =
464 table->red_flags & GRED_VQ_RED_FLAGS;
465
Thomas Graf66396072005-11-05 21:14:13 +0100466 for (i = table->DPs; i < MAX_DPs; i++) {
467 if (table->tab[i]) {
Yang Yingliangc17988a2013-12-23 17:38:58 +0800468 pr_warn("GRED: Warning: Destroying shadowed VQ 0x%x\n",
469 i);
Thomas Graf66396072005-11-05 21:14:13 +0100470 gred_destroy_vq(table->tab[i]);
471 table->tab[i] = NULL;
YOSHIFUJI Hideaki10297b92007-02-09 23:25:16 +0900472 }
Thomas Graf66396072005-11-05 21:14:13 +0100473 }
474
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800475 gred_offload(sch, TC_GRED_REPLACE);
Thomas Graf66396072005-11-05 21:14:13 +0100476 return 0;
477}
478
Thomas Graff62d6b92005-11-05 21:14:15 +0100479static inline int gred_change_vq(struct Qdisc *sch, int dp,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000480 struct tc_gred_qopt *ctl, int prio,
Eric Dumazet869aa412011-12-15 22:09:45 +0000481 u8 *stab, u32 max_P,
Jakub Kicinski4777be02018-11-14 22:23:47 -0800482 struct gred_sched_data **prealloc,
483 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
485 struct gred_sched *table = qdisc_priv(sch);
Eric Dumazet869aa412011-12-15 22:09:45 +0000486 struct gred_sched_data *q = table->tab[dp];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487
Eric Dumazete323d862021-03-10 08:26:41 -0800488 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Scell_log, stab)) {
Jakub Kicinski4777be02018-11-14 22:23:47 -0800489 NL_SET_ERR_MSG_MOD(extack, "invalid RED parameters");
Nogah Frankel8afa10c2017-12-04 13:31:11 +0200490 return -EINVAL;
Jakub Kicinski4777be02018-11-14 22:23:47 -0800491 }
Nogah Frankel8afa10c2017-12-04 13:31:11 +0200492
Eric Dumazet869aa412011-12-15 22:09:45 +0000493 if (!q) {
494 table->tab[dp] = q = *prealloc;
495 *prealloc = NULL;
496 if (!q)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497 return -ENOMEM;
Jakub Kicinski25fc1982018-11-14 22:23:50 -0800498 q->red_flags = table->red_flags & GRED_VQ_RED_FLAGS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 }
500
Thomas Graff62d6b92005-11-05 21:14:15 +0100501 q->DP = dp;
502 q->prio = prio;
David Warda3eb95f2015-05-09 22:01:46 -0400503 if (ctl->limit > sch->limit)
504 q->limit = sch->limit;
505 else
506 q->limit = ctl->limit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507
Thomas Graf22b33422005-11-05 21:14:16 +0100508 if (q->backlog == 0)
Eric Dumazeteeca6682012-01-05 02:25:16 +0000509 red_end_of_idle_period(&q->vars);
Thomas Graf22b33422005-11-05 21:14:16 +0100510
511 red_set_parms(&q->parms,
512 ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000513 ctl->Scell_log, stab, max_P);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000514 red_set_vars(&q->vars);
Thomas Graff62d6b92005-11-05 21:14:15 +0100515 return 0;
516}
517
Jakub Kicinski72111012018-11-14 22:23:51 -0800518static const struct nla_policy gred_vq_policy[TCA_GRED_VQ_MAX + 1] = {
519 [TCA_GRED_VQ_DP] = { .type = NLA_U32 },
520 [TCA_GRED_VQ_FLAGS] = { .type = NLA_U32 },
521};
522
523static const struct nla_policy gred_vqe_policy[TCA_GRED_VQ_ENTRY_MAX + 1] = {
524 [TCA_GRED_VQ_ENTRY] = { .type = NLA_NESTED },
525};
526
Patrick McHardy27a34212008-01-23 20:35:39 -0800527static const struct nla_policy gred_policy[TCA_GRED_MAX + 1] = {
528 [TCA_GRED_PARMS] = { .len = sizeof(struct tc_gred_qopt) },
529 [TCA_GRED_STAB] = { .len = 256 },
530 [TCA_GRED_DPS] = { .len = sizeof(struct tc_gred_sopt) },
Eric Dumazeta73ed262011-12-09 02:46:45 +0000531 [TCA_GRED_MAX_P] = { .type = NLA_U32 },
David Warda3eb95f2015-05-09 22:01:46 -0400532 [TCA_GRED_LIMIT] = { .type = NLA_U32 },
Jakub Kicinski72111012018-11-14 22:23:51 -0800533 [TCA_GRED_VQ_LIST] = { .type = NLA_NESTED },
Patrick McHardy27a34212008-01-23 20:35:39 -0800534};
535
Jakub Kicinski72111012018-11-14 22:23:51 -0800536static void gred_vq_apply(struct gred_sched *table, const struct nlattr *entry)
537{
538 struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
539 u32 dp;
540
Johannes Berg8cb08172019-04-26 14:07:28 +0200541 nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
542 gred_vq_policy, NULL);
Jakub Kicinski72111012018-11-14 22:23:51 -0800543
544 dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
545
546 if (tb[TCA_GRED_VQ_FLAGS])
547 table->tab[dp]->red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
548}
549
550static void gred_vqs_apply(struct gred_sched *table, struct nlattr *vqs)
551{
552 const struct nlattr *attr;
553 int rem;
554
555 nla_for_each_nested(attr, vqs, rem) {
556 switch (nla_type(attr)) {
557 case TCA_GRED_VQ_ENTRY:
558 gred_vq_apply(table, attr);
559 break;
560 }
561 }
562}
563
564static int gred_vq_validate(struct gred_sched *table, u32 cdp,
565 const struct nlattr *entry,
566 struct netlink_ext_ack *extack)
567{
568 struct nlattr *tb[TCA_GRED_VQ_MAX + 1];
569 int err;
570 u32 dp;
571
Johannes Berg8cb08172019-04-26 14:07:28 +0200572 err = nla_parse_nested_deprecated(tb, TCA_GRED_VQ_MAX, entry,
573 gred_vq_policy, extack);
Jakub Kicinski72111012018-11-14 22:23:51 -0800574 if (err < 0)
575 return err;
576
577 if (!tb[TCA_GRED_VQ_DP]) {
578 NL_SET_ERR_MSG_MOD(extack, "Virtual queue with no index specified");
579 return -EINVAL;
580 }
581 dp = nla_get_u32(tb[TCA_GRED_VQ_DP]);
582 if (dp >= table->DPs) {
583 NL_SET_ERR_MSG_MOD(extack, "Virtual queue with index out of bounds");
584 return -EINVAL;
585 }
586 if (dp != cdp && !table->tab[dp]) {
587 NL_SET_ERR_MSG_MOD(extack, "Virtual queue not yet instantiated");
588 return -EINVAL;
589 }
590
591 if (tb[TCA_GRED_VQ_FLAGS]) {
592 u32 red_flags = nla_get_u32(tb[TCA_GRED_VQ_FLAGS]);
593
594 if (table->red_flags && table->red_flags != red_flags) {
595 NL_SET_ERR_MSG_MOD(extack, "can't change per-virtual queue RED flags when per-Qdisc flags are used");
596 return -EINVAL;
597 }
598 if (red_flags & ~GRED_VQ_RED_FLAGS) {
599 NL_SET_ERR_MSG_MOD(extack,
600 "invalid RED flags specified");
601 return -EINVAL;
602 }
603 }
604
605 return 0;
606}
607
608static int gred_vqs_validate(struct gred_sched *table, u32 cdp,
609 struct nlattr *vqs, struct netlink_ext_ack *extack)
610{
611 const struct nlattr *attr;
612 int rem, err;
613
Johannes Berg8cb08172019-04-26 14:07:28 +0200614 err = nla_validate_nested_deprecated(vqs, TCA_GRED_VQ_ENTRY_MAX,
615 gred_vqe_policy, extack);
Jakub Kicinski72111012018-11-14 22:23:51 -0800616 if (err < 0)
617 return err;
618
619 nla_for_each_nested(attr, vqs, rem) {
620 switch (nla_type(attr)) {
621 case TCA_GRED_VQ_ENTRY:
622 err = gred_vq_validate(table, cdp, attr, extack);
623 if (err)
624 return err;
625 break;
626 default:
627 NL_SET_ERR_MSG_MOD(extack, "GRED_VQ_LIST can contain only entry attributes");
628 return -EINVAL;
629 }
630 }
631
632 if (rem > 0) {
633 NL_SET_ERR_MSG_MOD(extack, "Trailing data after parsing virtual queue list");
634 return -EINVAL;
635 }
636
637 return 0;
638}
639
Alexander Aring20307212017-12-20 12:35:14 -0500640static int gred_change(struct Qdisc *sch, struct nlattr *opt,
641 struct netlink_ext_ack *extack)
Thomas Graff62d6b92005-11-05 21:14:15 +0100642{
643 struct gred_sched *table = qdisc_priv(sch);
644 struct tc_gred_qopt *ctl;
Patrick McHardy1e904742008-01-22 22:11:17 -0800645 struct nlattr *tb[TCA_GRED_MAX + 1];
Patrick McHardycee63722008-01-23 20:33:32 -0800646 int err, prio = GRED_DEF_PRIO;
Thomas Graff62d6b92005-11-05 21:14:15 +0100647 u8 *stab;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000648 u32 max_P;
Eric Dumazet869aa412011-12-15 22:09:45 +0000649 struct gred_sched_data *prealloc;
Thomas Graff62d6b92005-11-05 21:14:15 +0100650
Patrick McHardycee63722008-01-23 20:33:32 -0800651 if (opt == NULL)
Thomas Graff62d6b92005-11-05 21:14:15 +0100652 return -EINVAL;
653
Johannes Berg8cb08172019-04-26 14:07:28 +0200654 err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
655 extack);
Patrick McHardycee63722008-01-23 20:33:32 -0800656 if (err < 0)
657 return err;
658
David Warda3eb95f2015-05-09 22:01:46 -0400659 if (tb[TCA_GRED_PARMS] == NULL && tb[TCA_GRED_STAB] == NULL) {
660 if (tb[TCA_GRED_LIMIT] != NULL)
661 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
Jakub Kicinski4777be02018-11-14 22:23:47 -0800662 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
David Warda3eb95f2015-05-09 22:01:46 -0400663 }
Thomas Graff62d6b92005-11-05 21:14:15 +0100664
Patrick McHardy1e904742008-01-22 22:11:17 -0800665 if (tb[TCA_GRED_PARMS] == NULL ||
David Warda3eb95f2015-05-09 22:01:46 -0400666 tb[TCA_GRED_STAB] == NULL ||
Jakub Kicinski4777be02018-11-14 22:23:47 -0800667 tb[TCA_GRED_LIMIT] != NULL) {
668 NL_SET_ERR_MSG_MOD(extack, "can't configure Qdisc and virtual queue at the same time");
Thomas Graff62d6b92005-11-05 21:14:15 +0100669 return -EINVAL;
Jakub Kicinski4777be02018-11-14 22:23:47 -0800670 }
Thomas Graff62d6b92005-11-05 21:14:15 +0100671
Eric Dumazeta73ed262011-12-09 02:46:45 +0000672 max_P = tb[TCA_GRED_MAX_P] ? nla_get_u32(tb[TCA_GRED_MAX_P]) : 0;
673
Patrick McHardy1e904742008-01-22 22:11:17 -0800674 ctl = nla_data(tb[TCA_GRED_PARMS]);
675 stab = nla_data(tb[TCA_GRED_STAB]);
Thomas Graff62d6b92005-11-05 21:14:15 +0100676
Jakub Kicinski4777be02018-11-14 22:23:47 -0800677 if (ctl->DP >= table->DPs) {
678 NL_SET_ERR_MSG_MOD(extack, "virtual queue index above virtual queue count");
Jakub Kicinski255f4802018-11-14 22:23:45 -0800679 return -EINVAL;
Jakub Kicinski4777be02018-11-14 22:23:47 -0800680 }
Thomas Graff62d6b92005-11-05 21:14:15 +0100681
Jakub Kicinski72111012018-11-14 22:23:51 -0800682 if (tb[TCA_GRED_VQ_LIST]) {
683 err = gred_vqs_validate(table, ctl->DP, tb[TCA_GRED_VQ_LIST],
684 extack);
685 if (err)
686 return err;
687 }
688
Thomas Graff62d6b92005-11-05 21:14:15 +0100689 if (gred_rio_mode(table)) {
690 if (ctl->prio == 0) {
691 int def_prio = GRED_DEF_PRIO;
692
693 if (table->tab[table->def])
694 def_prio = table->tab[table->def]->prio;
695
696 printk(KERN_DEBUG "GRED: DP %u does not have a prio "
697 "setting default to %d\n", ctl->DP, def_prio);
698
699 prio = def_prio;
700 } else
701 prio = ctl->prio;
702 }
703
Eric Dumazet869aa412011-12-15 22:09:45 +0000704 prealloc = kzalloc(sizeof(*prealloc), GFP_KERNEL);
Thomas Graff62d6b92005-11-05 21:14:15 +0100705 sch_tree_lock(sch);
706
Jakub Kicinski4777be02018-11-14 22:23:47 -0800707 err = gred_change_vq(sch, ctl->DP, ctl, prio, stab, max_P, &prealloc,
708 extack);
Thomas Graff62d6b92005-11-05 21:14:15 +0100709 if (err < 0)
Jakub Kicinski255f4802018-11-14 22:23:45 -0800710 goto err_unlock_free;
Thomas Graff62d6b92005-11-05 21:14:15 +0100711
Jakub Kicinski72111012018-11-14 22:23:51 -0800712 if (tb[TCA_GRED_VQ_LIST])
713 gred_vqs_apply(table, tb[TCA_GRED_VQ_LIST]);
714
Thomas Grafd6fd4e92005-11-05 21:14:10 +0100715 if (gred_rio_mode(table)) {
Thomas Grafdea3f622005-11-05 21:14:09 +0100716 gred_disable_wred_mode(table);
717 if (gred_wred_mode_check(sch))
718 gred_enable_wred_mode(table);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719 }
720
Thomas Graff62d6b92005-11-05 21:14:15 +0100721 sch_tree_unlock(sch);
Eric Dumazet869aa412011-12-15 22:09:45 +0000722 kfree(prealloc);
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800723
724 gred_offload(sch, TC_GRED_REPLACE);
Jakub Kicinski255f4802018-11-14 22:23:45 -0800725 return 0;
726
727err_unlock_free:
728 sch_tree_unlock(sch);
729 kfree(prealloc);
Thomas Graff62d6b92005-11-05 21:14:15 +0100730 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731}
732
Alexander Aringe63d7df2017-12-20 12:35:13 -0500733static int gred_init(struct Qdisc *sch, struct nlattr *opt,
734 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735{
Arnd Bergmannf25c0512021-10-26 12:07:11 +0200736 struct gred_sched *table = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800737 struct nlattr *tb[TCA_GRED_MAX + 1];
Patrick McHardycee63722008-01-23 20:33:32 -0800738 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Alexander Aringac8ef4a2017-12-20 12:35:11 -0500740 if (!opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 return -EINVAL;
742
Johannes Berg8cb08172019-04-26 14:07:28 +0200743 err = nla_parse_nested_deprecated(tb, TCA_GRED_MAX, opt, gred_policy,
744 extack);
Patrick McHardycee63722008-01-23 20:33:32 -0800745 if (err < 0)
746 return err;
747
Jakub Kicinski4777be02018-11-14 22:23:47 -0800748 if (tb[TCA_GRED_PARMS] || tb[TCA_GRED_STAB]) {
749 NL_SET_ERR_MSG_MOD(extack,
750 "virtual queue configuration can't be specified at initialization time");
Thomas Graf66396072005-11-05 21:14:13 +0100751 return -EINVAL;
Jakub Kicinski4777be02018-11-14 22:23:47 -0800752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
David Warda3eb95f2015-05-09 22:01:46 -0400754 if (tb[TCA_GRED_LIMIT])
755 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]);
Phil Sutter348e3432015-08-18 10:30:49 +0200756 else
757 sch->limit = qdisc_dev(sch)->tx_queue_len
758 * psched_mtu(qdisc_dev(sch));
David Warda3eb95f2015-05-09 22:01:46 -0400759
Arnd Bergmannf25c0512021-10-26 12:07:11 +0200760 if (qdisc_dev(sch)->netdev_ops->ndo_setup_tc) {
761 table->opt = kzalloc(sizeof(*table->opt), GFP_KERNEL);
762 if (!table->opt)
763 return -ENOMEM;
764 }
765
Jakub Kicinski4777be02018-11-14 22:23:47 -0800766 return gred_change_table_def(sch, tb[TCA_GRED_DPS], extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767}
768
769static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
770{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 struct gred_sched *table = qdisc_priv(sch);
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800772 struct nlattr *parms, *vqs, *opts = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 int i;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000774 u32 max_p[MAX_DPs];
Thomas Grafe0636822005-11-05 21:14:12 +0100775 struct tc_gred_sopt sopt = {
776 .DPs = table->DPs,
777 .def_DP = table->def,
778 .grio = gred_rio_mode(table),
Thomas Grafb38c7ee2005-11-05 21:14:27 +0100779 .flags = table->red_flags,
Thomas Grafe0636822005-11-05 21:14:12 +0100780 };
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Jakub Kicinskie49efd52018-11-19 15:21:43 -0800782 if (gred_offload_dump_stats(sch))
783 goto nla_put_failure;
784
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200785 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy1e904742008-01-22 22:11:17 -0800786 if (opts == NULL)
787 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400788 if (nla_put(skb, TCA_GRED_DPS, sizeof(sopt), &sopt))
789 goto nla_put_failure;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000790
791 for (i = 0; i < MAX_DPs; i++) {
792 struct gred_sched_data *q = table->tab[i];
793
794 max_p[i] = q ? q->parms.max_P : 0;
795 }
David S. Miller1b34ec42012-03-29 05:11:39 -0400796 if (nla_put(skb, TCA_GRED_MAX_P, sizeof(max_p), max_p))
797 goto nla_put_failure;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000798
David Warda3eb95f2015-05-09 22:01:46 -0400799 if (nla_put_u32(skb, TCA_GRED_LIMIT, sch->limit))
800 goto nla_put_failure;
801
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800802 /* Old style all-in-one dump of VQs */
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200803 parms = nla_nest_start_noflag(skb, TCA_GRED_PARMS);
Patrick McHardy1e904742008-01-22 22:11:17 -0800804 if (parms == NULL)
805 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
Thomas Graf05f1cc02005-11-05 21:14:11 +0100807 for (i = 0; i < MAX_DPs; i++) {
808 struct gred_sched_data *q = table->tab[i];
809 struct tc_gred_qopt opt;
David Ward1fe37b12012-09-13 05:22:34 +0000810 unsigned long qavg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Thomas Graf05f1cc02005-11-05 21:14:11 +0100812 memset(&opt, 0, sizeof(opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813
814 if (!q) {
815 /* hack -- fix at some point with proper message
816 This is how we indicate to tc that there is no VQ
817 at this DP */
818
Thomas Graf05f1cc02005-11-05 21:14:11 +0100819 opt.DP = MAX_DPs + i;
820 goto append_opt;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 }
822
Thomas Graf05f1cc02005-11-05 21:14:11 +0100823 opt.limit = q->limit;
824 opt.DP = q->DP;
David Ward145a42b2015-05-09 22:01:47 -0400825 opt.backlog = gred_backlog(table, q, sch);
Thomas Graf05f1cc02005-11-05 21:14:11 +0100826 opt.prio = q->prio;
Thomas Graf22b33422005-11-05 21:14:16 +0100827 opt.qth_min = q->parms.qth_min >> q->parms.Wlog;
828 opt.qth_max = q->parms.qth_max >> q->parms.Wlog;
829 opt.Wlog = q->parms.Wlog;
830 opt.Plog = q->parms.Plog;
831 opt.Scell_log = q->parms.Scell_log;
832 opt.other = q->stats.other;
833 opt.early = q->stats.prob_drop;
834 opt.forced = q->stats.forced_drop;
835 opt.pdrop = q->stats.pdrop;
Thomas Graf05f1cc02005-11-05 21:14:11 +0100836 opt.packets = q->packetsin;
837 opt.bytesin = q->bytesin;
838
David Ward244b65d2012-04-15 12:31:45 +0000839 if (gred_wred_mode(table))
840 gred_load_wred_set(table, q);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841
David Ward1fe37b12012-09-13 05:22:34 +0000842 qavg = red_calc_qavg(&q->parms, &q->vars,
843 q->vars.qavg >> q->parms.Wlog);
844 opt.qave = qavg >> q->parms.Wlog;
Thomas Graf22b33422005-11-05 21:14:16 +0100845
Thomas Graf05f1cc02005-11-05 21:14:11 +0100846append_opt:
Patrick McHardy1e904742008-01-22 22:11:17 -0800847 if (nla_append(skb, sizeof(opt), &opt) < 0)
848 goto nla_put_failure;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 }
850
Patrick McHardy1e904742008-01-22 22:11:17 -0800851 nla_nest_end(skb, parms);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800853 /* Dump the VQs again, in more structured way */
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200854 vqs = nla_nest_start_noflag(skb, TCA_GRED_VQ_LIST);
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800855 if (!vqs)
856 goto nla_put_failure;
857
858 for (i = 0; i < MAX_DPs; i++) {
859 struct gred_sched_data *q = table->tab[i];
860 struct nlattr *vq;
861
862 if (!q)
863 continue;
864
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200865 vq = nla_nest_start_noflag(skb, TCA_GRED_VQ_ENTRY);
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800866 if (!vq)
867 goto nla_put_failure;
868
869 if (nla_put_u32(skb, TCA_GRED_VQ_DP, q->DP))
870 goto nla_put_failure;
871
Jakub Kicinski72111012018-11-14 22:23:51 -0800872 if (nla_put_u32(skb, TCA_GRED_VQ_FLAGS, q->red_flags))
873 goto nla_put_failure;
874
Jakub Kicinski80e22e92018-11-14 22:23:49 -0800875 /* Stats */
876 if (nla_put_u64_64bit(skb, TCA_GRED_VQ_STAT_BYTES, q->bytesin,
877 TCA_GRED_VQ_PAD))
878 goto nla_put_failure;
879 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PACKETS, q->packetsin))
880 goto nla_put_failure;
881 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_BACKLOG,
882 gred_backlog(table, q, sch)))
883 goto nla_put_failure;
884 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_DROP,
885 q->stats.prob_drop))
886 goto nla_put_failure;
887 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PROB_MARK,
888 q->stats.prob_mark))
889 goto nla_put_failure;
890 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_DROP,
891 q->stats.forced_drop))
892 goto nla_put_failure;
893 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_FORCED_MARK,
894 q->stats.forced_mark))
895 goto nla_put_failure;
896 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_PDROP, q->stats.pdrop))
897 goto nla_put_failure;
898 if (nla_put_u32(skb, TCA_GRED_VQ_STAT_OTHER, q->stats.other))
899 goto nla_put_failure;
900
901 nla_nest_end(skb, vq);
902 }
903 nla_nest_end(skb, vqs);
904
Patrick McHardy1e904742008-01-22 22:11:17 -0800905 return nla_nest_end(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906
Patrick McHardy1e904742008-01-22 22:11:17 -0800907nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -0700908 nla_nest_cancel(skb, opts);
909 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910}
911
912static void gred_destroy(struct Qdisc *sch)
913{
914 struct gred_sched *table = qdisc_priv(sch);
915 int i;
916
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100917 for (i = 0; i < table->DPs; i++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 if (table->tab[i])
Thomas Graf66396072005-11-05 21:14:13 +0100919 gred_destroy_vq(table->tab[i]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 }
Jakub Kicinski890d8d22018-11-19 15:21:42 -0800921 gred_offload(sch, TC_GRED_DESTROY);
Arnd Bergmannf25c0512021-10-26 12:07:11 +0200922 kfree(table->opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923}
924
Eric Dumazet20fea082007-11-14 01:44:41 -0800925static struct Qdisc_ops gred_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 .id = "gred",
927 .priv_size = sizeof(struct gred_sched),
928 .enqueue = gred_enqueue,
929 .dequeue = gred_dequeue,
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700930 .peek = qdisc_peek_head,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 .init = gred_init,
932 .reset = gred_reset,
933 .destroy = gred_destroy,
934 .change = gred_change,
935 .dump = gred_dump,
936 .owner = THIS_MODULE,
937};
938
939static int __init gred_module_init(void)
940{
941 return register_qdisc(&gred_qdisc_ops);
942}
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100943
944static void __exit gred_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945{
946 unregister_qdisc(&gred_qdisc_ops);
947}
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949module_init(gred_module_init)
950module_exit(gred_module_exit)
Thomas Graf1e4dfaf92005-11-05 21:14:25 +0100951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952MODULE_LICENSE("GPL");