blob: 40adf1f07a82dfdb8f704eecfa9a14f000213a91 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Linus Torvalds1da177e2005-04-16 15:20:36 -07002/*
3 * net/sched/sch_red.c Random Early Detection queue.
4 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07005 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 *
7 * Changes:
Thomas Grafdba051f2005-11-05 21:14:08 +01008 * J Hadi Salim 980914: computation fixes
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly.
Thomas Grafdba051f2005-11-05 21:14:08 +010010 * J Hadi Salim 980816: ECN support
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 */
12
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/types.h>
15#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <net/pkt_sched.h>
Nogah Frankel602f3ba2017-11-06 07:23:41 +010018#include <net/pkt_cls.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <net/inet_ecn.h>
Thomas Graf6b31b282005-11-05 21:14:05 +010020#include <net/red.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
22
Thomas Graf6b31b282005-11-05 21:14:05 +010023/* Parameters, settable by user:
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 -----------------------------
25
26 limit - bytes (must be > qth_max + burst)
27
28 Hard limit on queue length, should be chosen >qth_max
29 to allow packet bursts. This parameter does not
30 affect the algorithms behaviour and can be chosen
31 arbitrarily high (well, less than ram size)
32 Really, this limit will never be reached
33 if RED works correctly.
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 */
35
Eric Dumazetcc7ec452011-01-19 19:26:56 +000036struct red_sched_data {
Thomas Graf6b31b282005-11-05 21:14:05 +010037 u32 limit; /* HARD maximal queue length */
Petr Machata14bc1752020-03-13 01:10:56 +020038
Thomas Graf6b31b282005-11-05 21:14:05 +010039 unsigned char flags;
Petr Machata14bc1752020-03-13 01:10:56 +020040 /* Non-flags in tc_red_qopt.flags. */
41 unsigned char userbits;
42
Eric Dumazet8af2a212011-12-08 06:06:03 +000043 struct timer_list adapt_timer;
Kees Cookcdeabbb2017-10-16 17:29:17 -070044 struct Qdisc *sch;
Thomas Graf6b31b282005-11-05 21:14:05 +010045 struct red_parms parms;
Eric Dumazeteeca6682012-01-05 02:25:16 +000046 struct red_vars vars;
Thomas Graf6b31b282005-11-05 21:14:05 +010047 struct red_stats stats;
Patrick McHardyf38c39d2006-03-20 19:20:44 -080048 struct Qdisc *qdisc;
Petr Machataaee9caa2020-06-27 01:45:28 +030049 struct tcf_qevent qe_early_drop;
50 struct tcf_qevent qe_mark;
Linus Torvalds1da177e2005-04-16 15:20:36 -070051};
52
Johannes Berg47a14942020-04-30 22:13:05 +020053#define TC_RED_SUPPORTED_FLAGS (TC_RED_HISTORIC_FLAGS | TC_RED_NODROP)
Petr Machata14bc1752020-03-13 01:10:56 +020054
Thomas Graf6b31b282005-11-05 21:14:05 +010055static inline int red_use_ecn(struct red_sched_data *q)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Thomas Graf6b31b282005-11-05 21:14:05 +010057 return q->flags & TC_RED_ECN;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59
Thomas Grafbdc450a2005-11-05 21:14:28 +010060static inline int red_use_harddrop(struct red_sched_data *q)
61{
62 return q->flags & TC_RED_HARDDROP;
63}
64
Petr Machata0a7fad22020-03-13 01:10:57 +020065static int red_use_nodrop(struct red_sched_data *q)
66{
67 return q->flags & TC_RED_NODROP;
68}
69
Petr Machataac5c66f2020-07-14 20:03:08 +030070static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch,
Eric Dumazet520ac302016-06-21 23:16:49 -070071 struct sk_buff **to_free)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
73 struct red_sched_data *q = qdisc_priv(sch);
Patrick McHardyf38c39d2006-03-20 19:20:44 -080074 struct Qdisc *child = q->qdisc;
75 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Eric Dumazeteeca6682012-01-05 02:25:16 +000077 q->vars.qavg = red_calc_qavg(&q->parms,
78 &q->vars,
79 child->qstats.backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080
Eric Dumazeteeca6682012-01-05 02:25:16 +000081 if (red_is_idling(&q->vars))
82 red_end_of_idle_period(&q->vars);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083
Eric Dumazeteeca6682012-01-05 02:25:16 +000084 switch (red_action(&q->parms, &q->vars, q->vars.qavg)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +000085 case RED_DONT_MARK:
86 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087
Eric Dumazetcc7ec452011-01-19 19:26:56 +000088 case RED_PROB_MARK:
John Fastabend25331d62014-09-28 11:53:29 -070089 qdisc_qstats_overlimit(sch);
Petr Machata0a7fad22020-03-13 01:10:57 +020090 if (!red_use_ecn(q)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +000091 q->stats.prob_drop++;
92 goto congestion_drop;
93 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Petr Machata0a7fad22020-03-13 01:10:57 +020095 if (INET_ECN_set_ce(skb)) {
96 q->stats.prob_mark++;
Petr Machata55f656c2020-07-14 20:03:07 +030097 skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
Petr Machataaee9caa2020-06-27 01:45:28 +030098 if (!skb)
99 return NET_XMIT_CN | ret;
Petr Machata0a7fad22020-03-13 01:10:57 +0200100 } else if (!red_use_nodrop(q)) {
101 q->stats.prob_drop++;
102 goto congestion_drop;
103 }
104
105 /* Non-ECT packet in ECN nodrop mode: queue it. */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000106 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000108 case RED_HARD_MARK:
John Fastabend25331d62014-09-28 11:53:29 -0700109 qdisc_qstats_overlimit(sch);
Petr Machata0a7fad22020-03-13 01:10:57 +0200110 if (red_use_harddrop(q) || !red_use_ecn(q)) {
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000111 q->stats.forced_drop++;
112 goto congestion_drop;
113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Petr Machata0a7fad22020-03-13 01:10:57 +0200115 if (INET_ECN_set_ce(skb)) {
116 q->stats.forced_mark++;
Petr Machata55f656c2020-07-14 20:03:07 +0300117 skb = tcf_qevent_handle(&q->qe_mark, sch, skb, to_free, &ret);
Petr Machataaee9caa2020-06-27 01:45:28 +0300118 if (!skb)
119 return NET_XMIT_CN | ret;
Petr Machata0a7fad22020-03-13 01:10:57 +0200120 } else if (!red_use_nodrop(q)) {
121 q->stats.forced_drop++;
122 goto congestion_drop;
123 }
124
125 /* Non-ECT packet in ECN nodrop mode: queue it. */
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000126 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 }
128
Petr Machataac5c66f2020-07-14 20:03:08 +0300129 ret = qdisc_enqueue(skb, child, to_free);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800130 if (likely(ret == NET_XMIT_SUCCESS)) {
WANG Congd7f4f332016-06-01 16:15:18 -0700131 qdisc_qstats_backlog_inc(sch, skb);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800132 sch->q.qlen++;
Jarek Poplawski378a2f02008-08-04 22:31:03 -0700133 } else if (net_xmit_drop_count(ret)) {
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800134 q->stats.pdrop++;
John Fastabend25331d62014-09-28 11:53:29 -0700135 qdisc_qstats_drop(sch);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800136 }
137 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Thomas Graf6b31b282005-11-05 21:14:05 +0100139congestion_drop:
Petr Machata55f656c2020-07-14 20:03:07 +0300140 skb = tcf_qevent_handle(&q->qe_early_drop, sch, skb, to_free, &ret);
Petr Machataaee9caa2020-06-27 01:45:28 +0300141 if (!skb)
142 return NET_XMIT_CN | ret;
143
Eric Dumazet520ac302016-06-21 23:16:49 -0700144 qdisc_drop(skb, sch, to_free);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 return NET_XMIT_CN;
146}
147
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000148static struct sk_buff *red_dequeue(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149{
150 struct sk_buff *skb;
151 struct red_sched_data *q = qdisc_priv(sch);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800152 struct Qdisc *child = q->qdisc;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800154 skb = child->dequeue(child);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800155 if (skb) {
156 qdisc_bstats_update(sch, skb);
WANG Congd7f4f332016-06-01 16:15:18 -0700157 qdisc_qstats_backlog_dec(sch, skb);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800158 sch->q.qlen--;
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800159 } else {
Eric Dumazeteeca6682012-01-05 02:25:16 +0000160 if (!red_is_idling(&q->vars))
161 red_start_of_idle_period(&q->vars);
Eric Dumazet9190b3b2011-01-20 23:31:33 -0800162 }
Thomas Graf9e178ff2005-11-05 21:14:06 +0100163 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164}
165
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000166static struct sk_buff *red_peek(struct Qdisc *sch)
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700167{
168 struct red_sched_data *q = qdisc_priv(sch);
169 struct Qdisc *child = q->qdisc;
170
171 return child->ops->peek(child);
172}
173
Eric Dumazetcc7ec452011-01-19 19:26:56 +0000174static void red_reset(struct Qdisc *sch)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175{
176 struct red_sched_data *q = qdisc_priv(sch);
177
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800178 qdisc_reset(q->qdisc);
WANG Congd7f4f332016-06-01 16:15:18 -0700179 sch->qstats.backlog = 0;
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800180 sch->q.qlen = 0;
Eric Dumazeteeca6682012-01-05 02:25:16 +0000181 red_restart(&q->vars);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182}
183
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100184static int red_offload(struct Qdisc *sch, bool enable)
185{
186 struct red_sched_data *q = qdisc_priv(sch);
187 struct net_device *dev = qdisc_dev(sch);
188 struct tc_red_qopt_offload opt = {
189 .handle = sch->handle,
190 .parent = sch->parent,
191 };
192
193 if (!tc_can_offload(dev) || !dev->netdev_ops->ndo_setup_tc)
194 return -EOPNOTSUPP;
195
196 if (enable) {
197 opt.command = TC_RED_REPLACE;
198 opt.set.min = q->parms.qth_min >> q->parms.Wlog;
199 opt.set.max = q->parms.qth_max >> q->parms.Wlog;
200 opt.set.probability = q->parms.max_P;
Jakub Kicinskic0b74902018-11-12 14:58:16 -0800201 opt.set.limit = q->limit;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100202 opt.set.is_ecn = red_use_ecn(q);
Jakub Kicinski190852a2018-11-08 19:50:38 -0800203 opt.set.is_harddrop = red_use_harddrop(q);
Petr Machata0a7fad22020-03-13 01:10:57 +0200204 opt.set.is_nodrop = red_use_nodrop(q);
Jakub Kicinski416ef9b2018-01-14 20:01:26 -0800205 opt.set.qstats = &sch->qstats;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100206 } else {
207 opt.command = TC_RED_DESTROY;
208 }
209
Nogah Frankel8234af22017-12-25 10:51:41 +0200210 return dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED, &opt);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100211}
212
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800213static void red_destroy(struct Qdisc *sch)
214{
215 struct red_sched_data *q = qdisc_priv(sch);
Eric Dumazet8af2a212011-12-08 06:06:03 +0000216
Petr Machataaee9caa2020-06-27 01:45:28 +0300217 tcf_qevent_destroy(&q->qe_mark, sch);
218 tcf_qevent_destroy(&q->qe_early_drop, sch);
Eric Dumazet8af2a212011-12-08 06:06:03 +0000219 del_timer_sync(&q->adapt_timer);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100220 red_offload(sch, false);
Vlad Buslov86bd4462018-09-24 19:22:50 +0300221 qdisc_put(q->qdisc);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800222}
223
Patrick McHardy27a34212008-01-23 20:35:39 -0800224static const struct nla_policy red_policy[TCA_RED_MAX + 1] = {
Petr Machata14bc1752020-03-13 01:10:56 +0200225 [TCA_RED_UNSPEC] = { .strict_start_type = TCA_RED_FLAGS },
Patrick McHardy27a34212008-01-23 20:35:39 -0800226 [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) },
227 [TCA_RED_STAB] = { .len = RED_STAB_SIZE },
Eric Dumazeta73ed262011-12-09 02:46:45 +0000228 [TCA_RED_MAX_P] = { .type = NLA_U32 },
Johannes Berg47a14942020-04-30 22:13:05 +0200229 [TCA_RED_FLAGS] = NLA_POLICY_BITFIELD32(TC_RED_SUPPORTED_FLAGS),
Petr Machataaee9caa2020-06-27 01:45:28 +0300230 [TCA_RED_EARLY_DROP_BLOCK] = { .type = NLA_U32 },
231 [TCA_RED_MARK_BLOCK] = { .type = NLA_U32 },
Patrick McHardy27a34212008-01-23 20:35:39 -0800232};
233
Petr Machata65545ea22020-06-27 01:45:27 +0300234static int __red_change(struct Qdisc *sch, struct nlattr **tb,
235 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236{
Jakub Kicinski0c8d13a2018-11-07 17:33:39 -0800237 struct Qdisc *old_child = NULL, *child = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 struct red_sched_data *q = qdisc_priv(sch);
Petr Machata14bc1752020-03-13 01:10:56 +0200239 struct nla_bitfield32 flags_bf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 struct tc_red_qopt *ctl;
Petr Machata14bc1752020-03-13 01:10:56 +0200241 unsigned char userbits;
242 unsigned char flags;
Patrick McHardycee63722008-01-23 20:33:32 -0800243 int err;
Eric Dumazeta73ed262011-12-09 02:46:45 +0000244 u32 max_P;
Eric Dumazete323d862021-03-10 08:26:41 -0800245 u8 *stab;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Patrick McHardy1e904742008-01-22 22:11:17 -0800247 if (tb[TCA_RED_PARMS] == NULL ||
Patrick McHardy27a34212008-01-23 20:35:39 -0800248 tb[TCA_RED_STAB] == NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 return -EINVAL;
250
Eric Dumazeta73ed262011-12-09 02:46:45 +0000251 max_P = tb[TCA_RED_MAX_P] ? nla_get_u32(tb[TCA_RED_MAX_P]) : 0;
252
Patrick McHardy1e904742008-01-22 22:11:17 -0800253 ctl = nla_data(tb[TCA_RED_PARMS]);
Eric Dumazete323d862021-03-10 08:26:41 -0800254 stab = nla_data(tb[TCA_RED_STAB]);
255 if (!red_check_params(ctl->qth_min, ctl->qth_max, ctl->Wlog,
256 ctl->Scell_log, stab))
Nogah Frankel8afa10c2017-12-04 13:31:11 +0200257 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
Petr Machata14bc1752020-03-13 01:10:56 +0200259 err = red_get_flags(ctl->flags, TC_RED_HISTORIC_FLAGS,
Johannes Berg47a14942020-04-30 22:13:05 +0200260 tb[TCA_RED_FLAGS], TC_RED_SUPPORTED_FLAGS,
Petr Machata14bc1752020-03-13 01:10:56 +0200261 &flags_bf, &userbits, extack);
262 if (err)
263 return err;
264
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800265 if (ctl->limit > 0) {
Alexander Aringa38a98822017-12-20 12:35:21 -0500266 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit,
267 extack);
Patrick McHardyfb0305c2008-07-05 23:40:21 -0700268 if (IS_ERR(child))
269 return PTR_ERR(child);
Paolo Abeni44a63b132018-05-18 14:51:44 +0200270
271 /* child is fifo, no need to check for noop_qdisc */
272 qdisc_hash_add(child, true);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800273 }
274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 sch_tree_lock(sch);
Petr Machata14bc1752020-03-13 01:10:56 +0200276
277 flags = (q->flags & ~flags_bf.selector) | flags_bf.value;
278 err = red_validate_flags(flags, extack);
279 if (err)
280 goto unlock_out;
281
282 q->flags = flags;
283 q->userbits = userbits;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 q->limit = ctl->limit;
Patrick McHardy5e50da02006-11-29 17:36:20 -0800285 if (child) {
Paolo Abenie5f0e8f2019-03-28 16:53:13 +0100286 qdisc_tree_flush_backlog(q->qdisc);
Jakub Kicinski0c8d13a2018-11-07 17:33:39 -0800287 old_child = q->qdisc;
Patrick McHardyb94c8af2008-11-20 04:11:36 -0800288 q->qdisc = child;
Patrick McHardy5e50da02006-11-29 17:36:20 -0800289 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290
Eric Dumazeteeca6682012-01-05 02:25:16 +0000291 red_set_parms(&q->parms,
292 ctl->qth_min, ctl->qth_max, ctl->Wlog,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000293 ctl->Plog, ctl->Scell_log,
Eric Dumazete323d862021-03-10 08:26:41 -0800294 stab,
Eric Dumazeta73ed262011-12-09 02:46:45 +0000295 max_P);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000296 red_set_vars(&q->vars);
Thomas Graf6b31b282005-11-05 21:14:05 +0100297
Eric Dumazet8af2a212011-12-08 06:06:03 +0000298 del_timer(&q->adapt_timer);
299 if (ctl->flags & TC_RED_ADAPTATIVE)
300 mod_timer(&q->adapt_timer, jiffies + HZ/2);
301
Eric Dumazet1ee5fa12011-12-01 11:06:34 +0000302 if (!q->qdisc->q.qlen)
Eric Dumazeteeca6682012-01-05 02:25:16 +0000303 red_start_of_idle_period(&q->vars);
Thomas Grafdba051f2005-11-05 21:14:08 +0100304
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 sch_tree_unlock(sch);
Jakub Kicinski0c8d13a2018-11-07 17:33:39 -0800306
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100307 red_offload(sch, true);
Jakub Kicinski0c8d13a2018-11-07 17:33:39 -0800308
309 if (old_child)
310 qdisc_put(old_child);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 return 0;
Petr Machata14bc1752020-03-13 01:10:56 +0200312
313unlock_out:
314 sch_tree_unlock(sch);
315 if (child)
316 qdisc_put(child);
317 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319
Kees Cookcdeabbb2017-10-16 17:29:17 -0700320static inline void red_adaptative_timer(struct timer_list *t)
Eric Dumazet8af2a212011-12-08 06:06:03 +0000321{
Kees Cookcdeabbb2017-10-16 17:29:17 -0700322 struct red_sched_data *q = from_timer(q, t, adapt_timer);
323 struct Qdisc *sch = q->sch;
Eric Dumazet8af2a212011-12-08 06:06:03 +0000324 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
325
326 spin_lock(root_lock);
Eric Dumazeteeca6682012-01-05 02:25:16 +0000327 red_adaptative_algo(&q->parms, &q->vars);
Eric Dumazet8af2a212011-12-08 06:06:03 +0000328 mod_timer(&q->adapt_timer, jiffies + HZ/2);
329 spin_unlock(root_lock);
330}
331
Alexander Aringe63d7df2017-12-20 12:35:13 -0500332static int red_init(struct Qdisc *sch, struct nlattr *opt,
333 struct netlink_ext_ack *extack)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800335 struct red_sched_data *q = qdisc_priv(sch);
Petr Machata65545ea22020-06-27 01:45:27 +0300336 struct nlattr *tb[TCA_RED_MAX + 1];
337 int err;
338
Cong Wang608b4ad2020-07-25 13:17:07 -0700339 q->qdisc = &noop_qdisc;
340 q->sch = sch;
341 timer_setup(&q->adapt_timer, red_adaptative_timer, 0);
342
Petr Machata65545ea22020-06-27 01:45:27 +0300343 if (!opt)
344 return -EINVAL;
345
346 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
347 extack);
348 if (err < 0)
349 return err;
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800350
Petr Machataaee9caa2020-06-27 01:45:28 +0300351 err = __red_change(sch, tb, extack);
352 if (err)
353 return err;
354
355 err = tcf_qevent_init(&q->qe_early_drop, sch,
356 FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP,
357 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
358 if (err)
Cong Wang5438dd42020-08-27 10:40:41 -0700359 return err;
Petr Machataaee9caa2020-06-27 01:45:28 +0300360
Cong Wang5438dd42020-08-27 10:40:41 -0700361 return tcf_qevent_init(&q->qe_mark, sch,
362 FLOW_BLOCK_BINDER_TYPE_RED_MARK,
363 tb[TCA_RED_MARK_BLOCK], extack);
Petr Machata65545ea22020-06-27 01:45:27 +0300364}
365
366static int red_change(struct Qdisc *sch, struct nlattr *opt,
367 struct netlink_ext_ack *extack)
368{
Petr Machataaee9caa2020-06-27 01:45:28 +0300369 struct red_sched_data *q = qdisc_priv(sch);
Petr Machata65545ea22020-06-27 01:45:27 +0300370 struct nlattr *tb[TCA_RED_MAX + 1];
371 int err;
372
373 if (!opt)
374 return -EINVAL;
375
376 err = nla_parse_nested_deprecated(tb, TCA_RED_MAX, opt, red_policy,
377 extack);
378 if (err < 0)
379 return err;
380
Petr Machataaee9caa2020-06-27 01:45:28 +0300381 err = tcf_qevent_validate_change(&q->qe_early_drop,
382 tb[TCA_RED_EARLY_DROP_BLOCK], extack);
383 if (err)
384 return err;
385
386 err = tcf_qevent_validate_change(&q->qe_mark,
387 tb[TCA_RED_MARK_BLOCK], extack);
388 if (err)
389 return err;
390
Petr Machata65545ea22020-06-27 01:45:27 +0300391 return __red_change(sch, tb, extack);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392}
393
Jakub Kicinskidad54c02018-11-07 17:33:35 -0800394static int red_dump_offload_stats(struct Qdisc *sch)
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100395{
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100396 struct tc_red_qopt_offload hw_stats = {
Andrew Mortonee9d3422017-11-10 15:09:53 -0800397 .command = TC_RED_STATS,
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100398 .handle = sch->handle,
399 .parent = sch->parent,
Andrew Mortonee9d3422017-11-10 15:09:53 -0800400 {
401 .stats.bstats = &sch->bstats,
402 .stats.qstats = &sch->qstats,
403 },
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100404 };
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100405
Jakub Kicinskib5928432018-11-07 17:33:34 -0800406 return qdisc_offload_dump_helper(sch, TC_SETUP_QDISC_RED, &hw_stats);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100407}
408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409static int red_dump(struct Qdisc *sch, struct sk_buff *skb)
410{
411 struct red_sched_data *q = qdisc_priv(sch);
Patrick McHardy1e904742008-01-22 22:11:17 -0800412 struct nlattr *opts = NULL;
Thomas Graf6b31b282005-11-05 21:14:05 +0100413 struct tc_red_qopt opt = {
414 .limit = q->limit,
Petr Machata14bc1752020-03-13 01:10:56 +0200415 .flags = (q->flags & TC_RED_HISTORIC_FLAGS) |
416 q->userbits,
Thomas Graf6b31b282005-11-05 21:14:05 +0100417 .qth_min = q->parms.qth_min >> q->parms.Wlog,
418 .qth_max = q->parms.qth_max >> q->parms.Wlog,
419 .Wlog = q->parms.Wlog,
420 .Plog = q->parms.Plog,
421 .Scell_log = q->parms.Scell_log,
422 };
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100423 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Jakub Kicinskidad54c02018-11-07 17:33:35 -0800425 err = red_dump_offload_stats(sch);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100426 if (err)
427 goto nla_put_failure;
428
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200429 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
Patrick McHardy1e904742008-01-22 22:11:17 -0800430 if (opts == NULL)
431 goto nla_put_failure;
David S. Miller1b34ec42012-03-29 05:11:39 -0400432 if (nla_put(skb, TCA_RED_PARMS, sizeof(opt), &opt) ||
Petr Machata14bc1752020-03-13 01:10:56 +0200433 nla_put_u32(skb, TCA_RED_MAX_P, q->parms.max_P) ||
Jiri Pirko8953b072020-03-28 16:37:42 +0100434 nla_put_bitfield32(skb, TCA_RED_FLAGS,
Petr Machataaee9caa2020-06-27 01:45:28 +0300435 q->flags, TC_RED_SUPPORTED_FLAGS) ||
436 tcf_qevent_dump(skb, TCA_RED_MARK_BLOCK, &q->qe_mark) ||
437 tcf_qevent_dump(skb, TCA_RED_EARLY_DROP_BLOCK, &q->qe_early_drop))
David S. Miller1b34ec42012-03-29 05:11:39 -0400438 goto nla_put_failure;
Patrick McHardy1e904742008-01-22 22:11:17 -0800439 return nla_nest_end(skb, opts);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440
Patrick McHardy1e904742008-01-22 22:11:17 -0800441nla_put_failure:
Thomas Grafbc3ed282008-06-03 16:36:54 -0700442 nla_nest_cancel(skb, opts);
443 return -EMSGSIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444}
445
446static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
447{
448 struct red_sched_data *q = qdisc_priv(sch);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100449 struct net_device *dev = qdisc_dev(sch);
Nogah Frankelf8253df2018-01-10 14:59:59 +0100450 struct tc_red_xstats st = {0};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
Yuval Mintz428a68a2017-12-14 15:54:30 +0200452 if (sch->flags & TCQ_F_OFFLOADED) {
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100453 struct tc_red_qopt_offload hw_stats_request = {
Andrew Mortonee9d3422017-11-10 15:09:53 -0800454 .command = TC_RED_XSTATS,
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100455 .handle = sch->handle,
456 .parent = sch->parent,
Andrew Mortonee9d3422017-11-10 15:09:53 -0800457 {
Nogah Frankelf8253df2018-01-10 14:59:59 +0100458 .xstats = &q->stats,
Andrew Mortonee9d3422017-11-10 15:09:53 -0800459 },
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100460 };
Nogah Frankelf8253df2018-01-10 14:59:59 +0100461 dev->netdev_ops->ndo_setup_tc(dev, TC_SETUP_QDISC_RED,
462 &hw_stats_request);
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100463 }
Nogah Frankelf8253df2018-01-10 14:59:59 +0100464 st.early = q->stats.prob_drop + q->stats.forced_drop;
465 st.pdrop = q->stats.pdrop;
466 st.other = q->stats.other;
467 st.marked = q->stats.prob_mark + q->stats.forced_mark;
Nogah Frankel602f3ba2017-11-06 07:23:41 +0100468
Thomas Graf6b31b282005-11-05 21:14:05 +0100469 return gnet_stats_copy_app(d, &st, sizeof(st));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470}
471
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800472static int red_dump_class(struct Qdisc *sch, unsigned long cl,
473 struct sk_buff *skb, struct tcmsg *tcm)
474{
475 struct red_sched_data *q = qdisc_priv(sch);
476
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800477 tcm->tcm_handle |= TC_H_MIN(1);
478 tcm->tcm_info = q->qdisc->handle;
479 return 0;
480}
481
Jakub Kicinskibf2a7522018-11-12 14:58:13 -0800482static void red_graft_offload(struct Qdisc *sch,
483 struct Qdisc *new, struct Qdisc *old,
484 struct netlink_ext_ack *extack)
485{
486 struct tc_red_qopt_offload graft_offload = {
487 .handle = sch->handle,
488 .parent = sch->parent,
489 .child_handle = new->handle,
490 .command = TC_RED_GRAFT,
491 };
492
493 qdisc_offload_graft_helper(qdisc_dev(sch), sch, new, old,
494 TC_SETUP_QDISC_RED, &graft_offload, extack);
495}
496
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800497static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
Alexander Aring653d6fd2017-12-20 12:35:17 -0500498 struct Qdisc **old, struct netlink_ext_ack *extack)
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800499{
500 struct red_sched_data *q = qdisc_priv(sch);
501
502 if (new == NULL)
503 new = &noop_qdisc;
504
WANG Cong86a79962016-02-25 14:55:00 -0800505 *old = qdisc_replace(sch, new, &q->qdisc);
Jakub Kicinskibf2a7522018-11-12 14:58:13 -0800506
507 red_graft_offload(sch, new, *old, extack);
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800508 return 0;
509}
510
511static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg)
512{
513 struct red_sched_data *q = qdisc_priv(sch);
514 return q->qdisc;
515}
516
WANG Cong143976c2017-08-24 16:51:29 -0700517static unsigned long red_find(struct Qdisc *sch, u32 classid)
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800518{
519 return 1;
520}
521
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800522static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker)
523{
524 if (!walker->stop) {
525 if (walker->count >= walker->skip)
526 if (walker->fn(sch, 1, walker) < 0) {
527 walker->stop = 1;
528 return;
529 }
530 walker->count++;
531 }
532}
533
Eric Dumazet20fea082007-11-14 01:44:41 -0800534static const struct Qdisc_class_ops red_class_ops = {
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800535 .graft = red_graft,
536 .leaf = red_leaf,
WANG Cong143976c2017-08-24 16:51:29 -0700537 .find = red_find,
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800538 .walk = red_walk,
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800539 .dump = red_dump_class,
540};
541
Eric Dumazet20fea082007-11-14 01:44:41 -0800542static struct Qdisc_ops red_qdisc_ops __read_mostly = {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543 .id = "red",
544 .priv_size = sizeof(struct red_sched_data),
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800545 .cl_ops = &red_class_ops,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 .enqueue = red_enqueue,
547 .dequeue = red_dequeue,
Jarek Poplawski8e3af972008-10-31 00:45:55 -0700548 .peek = red_peek,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 .init = red_init,
550 .reset = red_reset,
Patrick McHardyf38c39d2006-03-20 19:20:44 -0800551 .destroy = red_destroy,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 .change = red_change,
553 .dump = red_dump,
554 .dump_stats = red_dump_stats,
555 .owner = THIS_MODULE,
556};
557
558static int __init red_module_init(void)
559{
560 return register_qdisc(&red_qdisc_ops);
561}
Thomas Grafdba051f2005-11-05 21:14:08 +0100562
563static void __exit red_module_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564{
565 unregister_qdisc(&red_qdisc_ops);
566}
Thomas Grafdba051f2005-11-05 21:14:08 +0100567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568module_init(red_module_init)
569module_exit(red_module_exit)
Thomas Grafdba051f2005-11-05 21:14:08 +0100570
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571MODULE_LICENSE("GPL");