blob: e2faf33d282bc8a59b4cab2abbf2e1fc14b713a8 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
Eric Dumazet4b549a22012-05-11 09:30:50 +00002/*
3 * Fair Queue CoDel discipline
4 *
Eric Dumazet80ba92f2015-05-08 15:05:12 -07005 * Copyright (C) 2012,2015 Eric Dumazet <edumazet@google.com>
Eric Dumazet4b549a22012-05-11 09:30:50 +00006 */
7
8#include <linux/module.h>
9#include <linux/types.h>
10#include <linux/kernel.h>
11#include <linux/jiffies.h>
12#include <linux/string.h>
13#include <linux/in.h>
14#include <linux/errno.h>
15#include <linux/init.h>
16#include <linux/skbuff.h>
17#include <linux/jhash.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20#include <net/netlink.h>
21#include <net/pkt_sched.h>
Jiri Pirkocf1facd2017-02-09 14:38:56 +010022#include <net/pkt_cls.h>
Eric Dumazet4b549a22012-05-11 09:30:50 +000023#include <net/codel.h>
Michal Kaziord068ca22016-04-22 14:15:59 +020024#include <net/codel_impl.h>
25#include <net/codel_qdisc.h>
Eric Dumazet4b549a22012-05-11 09:30:50 +000026
27/* Fair Queue CoDel.
28 *
29 * Principles :
30 * Packets are classified (internal classifier or external) on flows.
31 * This is a Stochastic model (as we use a hash, several flows
32 * might be hashed on same slot)
33 * Each flow has a CoDel managed queue.
34 * Flows are linked onto two (Round Robin) lists,
35 * so that new flows have priority on old ones.
36 *
37 * For a given flow, packets are not reordered (CoDel uses a FIFO)
38 * head drops only.
39 * ECN capability is on by default.
40 * Low memory footprint (64 bytes per flow)
41 */
42
43struct fq_codel_flow {
44 struct sk_buff *head;
45 struct sk_buff *tail;
46 struct list_head flowchain;
47 int deficit;
48 u32 dropped; /* number of drops (or ECN marks) on this flow */
49 struct codel_vars cvars;
50}; /* please try to keep this structure <= 64 bytes */
51
52struct fq_codel_sched_data {
John Fastabend25d8c0d2014-09-12 20:05:27 -070053 struct tcf_proto __rcu *filter_list; /* optional external classifier */
Jiri Pirko6529eab2017-05-17 11:07:55 +020054 struct tcf_block *block;
Eric Dumazet4b549a22012-05-11 09:30:50 +000055 struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
56 u32 *backlogs; /* backlog table [flows_cnt] */
57 u32 flows_cnt; /* number of flows */
Eric Dumazet4b549a22012-05-11 09:30:50 +000058 u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
Eric Dumazet9d185622016-05-01 16:47:26 -070059 u32 drop_batch_size;
Eric Dumazet95b58432016-05-06 08:55:12 -070060 u32 memory_limit;
Eric Dumazet4b549a22012-05-11 09:30:50 +000061 struct codel_params cparams;
62 struct codel_stats cstats;
Eric Dumazet95b58432016-05-06 08:55:12 -070063 u32 memory_usage;
64 u32 drop_overmemory;
Eric Dumazet4b549a22012-05-11 09:30:50 +000065 u32 drop_overlimit;
66 u32 new_flow_count;
67
68 struct list_head new_flows; /* list of new flows */
69 struct list_head old_flows; /* list of old flows */
70};
71
72static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
Tom Herbert342db222015-05-01 11:30:13 -070073 struct sk_buff *skb)
Eric Dumazet4b549a22012-05-11 09:30:50 +000074{
Andrew Collins264b87f2017-01-18 14:04:28 -070075 return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
Eric Dumazet4b549a22012-05-11 09:30:50 +000076}
77
78static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
79 int *qerr)
80{
81 struct fq_codel_sched_data *q = qdisc_priv(sch);
John Fastabend25d8c0d2014-09-12 20:05:27 -070082 struct tcf_proto *filter;
Eric Dumazet4b549a22012-05-11 09:30:50 +000083 struct tcf_result res;
84 int result;
85
86 if (TC_H_MAJ(skb->priority) == sch->handle &&
87 TC_H_MIN(skb->priority) > 0 &&
88 TC_H_MIN(skb->priority) <= q->flows_cnt)
89 return TC_H_MIN(skb->priority);
90
Valdis.Kletnieks@vt.edu69204cf2014-12-09 16:15:50 -050091 filter = rcu_dereference_bh(q->filter_list);
John Fastabend25d8c0d2014-09-12 20:05:27 -070092 if (!filter)
Eric Dumazet4b549a22012-05-11 09:30:50 +000093 return fq_codel_hash(q, skb) + 1;
94
95 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
Jiri Pirko87d83092017-05-17 11:07:54 +020096 result = tcf_classify(skb, filter, &res, false);
Eric Dumazet4b549a22012-05-11 09:30:50 +000097 if (result >= 0) {
98#ifdef CONFIG_NET_CLS_ACT
99 switch (result) {
100 case TC_ACT_STOLEN:
101 case TC_ACT_QUEUED:
Jiri Pirkoe25ea212017-06-06 14:12:02 +0200102 case TC_ACT_TRAP:
Eric Dumazet4b549a22012-05-11 09:30:50 +0000103 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
Gustavo A. R. Silvaf3ae6082017-10-19 16:28:24 -0500104 /* fall through */
Eric Dumazet4b549a22012-05-11 09:30:50 +0000105 case TC_ACT_SHOT:
106 return 0;
107 }
108#endif
109 if (TC_H_MIN(res.classid) <= q->flows_cnt)
110 return TC_H_MIN(res.classid);
111 }
112 return 0;
113}
114
115/* helper functions : might be changed when/if skb use a standard list_head */
116
117/* remove one skb from head of slot queue */
118static inline struct sk_buff *dequeue_head(struct fq_codel_flow *flow)
119{
120 struct sk_buff *skb = flow->head;
121
122 flow->head = skb->next;
David S. Millera8305bf2018-07-29 20:42:53 -0700123 skb_mark_not_on_list(skb);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000124 return skb;
125}
126
127/* add skb to flow queue (tail add) */
128static inline void flow_queue_add(struct fq_codel_flow *flow,
129 struct sk_buff *skb)
130{
131 if (flow->head == NULL)
132 flow->head = skb;
133 else
134 flow->tail->next = skb;
135 flow->tail = skb;
136 skb->next = NULL;
137}
138
Eric Dumazet520ac302016-06-21 23:16:49 -0700139static unsigned int fq_codel_drop(struct Qdisc *sch, unsigned int max_packets,
140 struct sk_buff **to_free)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000141{
142 struct fq_codel_sched_data *q = qdisc_priv(sch);
143 struct sk_buff *skb;
144 unsigned int maxbacklog = 0, idx = 0, i, len;
145 struct fq_codel_flow *flow;
Eric Dumazet9d185622016-05-01 16:47:26 -0700146 unsigned int threshold;
Eric Dumazet95b58432016-05-06 08:55:12 -0700147 unsigned int mem = 0;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000148
Eric Dumazet9d185622016-05-01 16:47:26 -0700149 /* Queue is full! Find the fat flow and drop packet(s) from it.
Eric Dumazet4b549a22012-05-11 09:30:50 +0000150 * This might sound expensive, but with 1024 flows, we scan
151 * 4KB of memory, and we dont need to handle a complex tree
152 * in fast path (packet queue/enqueue) with many cache misses.
Eric Dumazet9d185622016-05-01 16:47:26 -0700153 * In stress mode, we'll try to drop 64 packets from the flow,
154 * amortizing this linear lookup to one cache line per drop.
Eric Dumazet4b549a22012-05-11 09:30:50 +0000155 */
156 for (i = 0; i < q->flows_cnt; i++) {
157 if (q->backlogs[i] > maxbacklog) {
158 maxbacklog = q->backlogs[i];
159 idx = i;
160 }
161 }
Eric Dumazet9d185622016-05-01 16:47:26 -0700162
163 /* Our goal is to drop half of this fat flow backlog */
164 threshold = maxbacklog >> 1;
165
Eric Dumazet4b549a22012-05-11 09:30:50 +0000166 flow = &q->flows[idx];
Eric Dumazet9d185622016-05-01 16:47:26 -0700167 len = 0;
168 i = 0;
169 do {
170 skb = dequeue_head(flow);
171 len += qdisc_pkt_len(skb);
Eric Dumazet008830b2016-06-21 23:16:50 -0700172 mem += get_codel_cb(skb)->mem_usage;
Eric Dumazet520ac302016-06-21 23:16:49 -0700173 __qdisc_drop(skb, to_free);
Eric Dumazet9d185622016-05-01 16:47:26 -0700174 } while (++i < max_packets && len < threshold);
175
176 flow->dropped += i;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000177 q->backlogs[idx] -= len;
Eric Dumazet95b58432016-05-06 08:55:12 -0700178 q->memory_usage -= mem;
Eric Dumazet9d185622016-05-01 16:47:26 -0700179 sch->qstats.drops += i;
180 sch->qstats.backlog -= len;
181 sch->q.qlen -= i;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000182 return idx;
183}
184
Eric Dumazet520ac302016-06-21 23:16:49 -0700185static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch,
186 struct sk_buff **to_free)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000187{
188 struct fq_codel_sched_data *q = qdisc_priv(sch);
Eric Dumazet9d185622016-05-01 16:47:26 -0700189 unsigned int idx, prev_backlog, prev_qlen;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000190 struct fq_codel_flow *flow;
191 int uninitialized_var(ret);
Eric Dumazet80e509d2016-06-04 12:55:13 -0700192 unsigned int pkt_len;
Eric Dumazet95b58432016-05-06 08:55:12 -0700193 bool memory_limited;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000194
195 idx = fq_codel_classify(skb, sch, &ret);
196 if (idx == 0) {
197 if (ret & __NET_XMIT_BYPASS)
John Fastabend25331d62014-09-28 11:53:29 -0700198 qdisc_qstats_drop(sch);
Eric Dumazet520ac302016-06-21 23:16:49 -0700199 __qdisc_drop(skb, to_free);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000200 return ret;
201 }
202 idx--;
203
204 codel_set_enqueue_time(skb);
205 flow = &q->flows[idx];
206 flow_queue_add(flow, skb);
207 q->backlogs[idx] += qdisc_pkt_len(skb);
John Fastabend25331d62014-09-28 11:53:29 -0700208 qdisc_qstats_backlog_inc(sch, skb);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000209
210 if (list_empty(&flow->flowchain)) {
211 list_add_tail(&flow->flowchain, &q->new_flows);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000212 q->new_flow_count++;
213 flow->deficit = q->quantum;
214 flow->dropped = 0;
215 }
Eric Dumazet008830b2016-06-21 23:16:50 -0700216 get_codel_cb(skb)->mem_usage = skb->truesize;
217 q->memory_usage += get_codel_cb(skb)->mem_usage;
Eric Dumazet95b58432016-05-06 08:55:12 -0700218 memory_limited = q->memory_usage > q->memory_limit;
219 if (++sch->q.qlen <= sch->limit && !memory_limited)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000220 return NET_XMIT_SUCCESS;
221
WANG Cong2ccccf52016-02-25 14:55:01 -0800222 prev_backlog = sch->qstats.backlog;
Eric Dumazet9d185622016-05-01 16:47:26 -0700223 prev_qlen = sch->q.qlen;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000224
Eric Dumazet80e509d2016-06-04 12:55:13 -0700225 /* save this packet length as it might be dropped by fq_codel_drop() */
226 pkt_len = qdisc_pkt_len(skb);
Eric Dumazet9d185622016-05-01 16:47:26 -0700227 /* fq_codel_drop() is quite expensive, as it performs a linear search
228 * in q->backlogs[] to find a fat flow.
229 * So instead of dropping a single packet, drop half of its backlog
230 * with a 64 packets limit to not add a too big cpu spike here.
231 */
Eric Dumazet520ac302016-06-21 23:16:49 -0700232 ret = fq_codel_drop(sch, q->drop_batch_size, to_free);
Eric Dumazet9d185622016-05-01 16:47:26 -0700233
Eric Dumazet80e509d2016-06-04 12:55:13 -0700234 prev_qlen -= sch->q.qlen;
235 prev_backlog -= sch->qstats.backlog;
236 q->drop_overlimit += prev_qlen;
Eric Dumazet95b58432016-05-06 08:55:12 -0700237 if (memory_limited)
Eric Dumazet80e509d2016-06-04 12:55:13 -0700238 q->drop_overmemory += prev_qlen;
Eric Dumazet9d185622016-05-01 16:47:26 -0700239
Eric Dumazet80e509d2016-06-04 12:55:13 -0700240 /* As we dropped packet(s), better let upper stack know this.
241 * If we dropped a packet for this flow, return NET_XMIT_CN,
242 * but in this case, our parents wont increase their backlogs.
243 */
244 if (ret == idx) {
245 qdisc_tree_reduce_backlog(sch, prev_qlen - 1,
246 prev_backlog - pkt_len);
247 return NET_XMIT_CN;
248 }
249 qdisc_tree_reduce_backlog(sch, prev_qlen, prev_backlog);
250 return NET_XMIT_SUCCESS;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000251}
252
253/* This is the specific function called from codel_dequeue()
254 * to dequeue a packet from queue. Note: backlog is handled in
255 * codel, we dont need to reduce it here.
256 */
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200257static struct sk_buff *dequeue_func(struct codel_vars *vars, void *ctx)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000258{
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200259 struct Qdisc *sch = ctx;
Eric Dumazet865ec552012-05-16 04:39:09 +0000260 struct fq_codel_sched_data *q = qdisc_priv(sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000261 struct fq_codel_flow *flow;
262 struct sk_buff *skb = NULL;
263
264 flow = container_of(vars, struct fq_codel_flow, cvars);
265 if (flow->head) {
266 skb = dequeue_head(flow);
Eric Dumazet865ec552012-05-16 04:39:09 +0000267 q->backlogs[flow - q->flows] -= qdisc_pkt_len(skb);
Eric Dumazet008830b2016-06-21 23:16:50 -0700268 q->memory_usage -= get_codel_cb(skb)->mem_usage;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000269 sch->q.qlen--;
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200270 sch->qstats.backlog -= qdisc_pkt_len(skb);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000271 }
272 return skb;
273}
274
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200275static void drop_func(struct sk_buff *skb, void *ctx)
276{
277 struct Qdisc *sch = ctx;
278
Eric Dumazet520ac302016-06-21 23:16:49 -0700279 kfree_skb(skb);
280 qdisc_qstats_drop(sch);
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200281}
282
Eric Dumazet4b549a22012-05-11 09:30:50 +0000283static struct sk_buff *fq_codel_dequeue(struct Qdisc *sch)
284{
285 struct fq_codel_sched_data *q = qdisc_priv(sch);
286 struct sk_buff *skb;
287 struct fq_codel_flow *flow;
288 struct list_head *head;
289 u32 prev_drop_count, prev_ecn_mark;
290
291begin:
292 head = &q->new_flows;
293 if (list_empty(head)) {
294 head = &q->old_flows;
295 if (list_empty(head))
296 return NULL;
297 }
298 flow = list_first_entry(head, struct fq_codel_flow, flowchain);
299
300 if (flow->deficit <= 0) {
301 flow->deficit += q->quantum;
302 list_move_tail(&flow->flowchain, &q->old_flows);
303 goto begin;
304 }
305
306 prev_drop_count = q->cstats.drop_count;
307 prev_ecn_mark = q->cstats.ecn_mark;
308
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200309 skb = codel_dequeue(sch, &sch->qstats.backlog, &q->cparams,
310 &flow->cvars, &q->cstats, qdisc_pkt_len,
311 codel_get_enqueue_time, drop_func, dequeue_func);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000312
313 flow->dropped += q->cstats.drop_count - prev_drop_count;
314 flow->dropped += q->cstats.ecn_mark - prev_ecn_mark;
315
316 if (!skb) {
317 /* force a pass through old_flows to prevent starvation */
318 if ((head == &q->new_flows) && !list_empty(&q->old_flows))
319 list_move_tail(&flow->flowchain, &q->old_flows);
320 else
321 list_del_init(&flow->flowchain);
322 goto begin;
323 }
324 qdisc_bstats_update(sch, skb);
325 flow->deficit -= qdisc_pkt_len(skb);
WANG Cong2ccccf52016-02-25 14:55:01 -0800326 /* We cant call qdisc_tree_reduce_backlog() if our qlen is 0,
Eric Dumazet4b549a22012-05-11 09:30:50 +0000327 * or HTB crashes. Defer it for next round.
328 */
329 if (q->cstats.drop_count && sch->q.qlen) {
WANG Cong2ccccf52016-02-25 14:55:01 -0800330 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count,
331 q->cstats.drop_len);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000332 q->cstats.drop_count = 0;
WANG Cong2ccccf52016-02-25 14:55:01 -0800333 q->cstats.drop_len = 0;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000334 }
335 return skb;
336}
337
Eric Dumazetece5d4c2016-06-13 20:21:54 -0700338static void fq_codel_flow_purge(struct fq_codel_flow *flow)
339{
340 rtnl_kfree_skbs(flow->head, flow->tail);
341 flow->head = NULL;
342}
343
Eric Dumazet4b549a22012-05-11 09:30:50 +0000344static void fq_codel_reset(struct Qdisc *sch)
345{
Eric Dumazet3d0e0af2015-07-31 17:53:39 -0700346 struct fq_codel_sched_data *q = qdisc_priv(sch);
347 int i;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000348
Eric Dumazet3d0e0af2015-07-31 17:53:39 -0700349 INIT_LIST_HEAD(&q->new_flows);
350 INIT_LIST_HEAD(&q->old_flows);
351 for (i = 0; i < q->flows_cnt; i++) {
352 struct fq_codel_flow *flow = q->flows + i;
353
Eric Dumazetece5d4c2016-06-13 20:21:54 -0700354 fq_codel_flow_purge(flow);
Eric Dumazet3d0e0af2015-07-31 17:53:39 -0700355 INIT_LIST_HEAD(&flow->flowchain);
356 codel_vars_init(&flow->cvars);
357 }
358 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
359 sch->q.qlen = 0;
Eric Dumazetece5d4c2016-06-13 20:21:54 -0700360 sch->qstats.backlog = 0;
Eric Dumazet77f57762016-05-15 18:16:38 -0700361 q->memory_usage = 0;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000362}
363
364static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
365 [TCA_FQ_CODEL_TARGET] = { .type = NLA_U32 },
366 [TCA_FQ_CODEL_LIMIT] = { .type = NLA_U32 },
367 [TCA_FQ_CODEL_INTERVAL] = { .type = NLA_U32 },
368 [TCA_FQ_CODEL_ECN] = { .type = NLA_U32 },
369 [TCA_FQ_CODEL_FLOWS] = { .type = NLA_U32 },
370 [TCA_FQ_CODEL_QUANTUM] = { .type = NLA_U32 },
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700371 [TCA_FQ_CODEL_CE_THRESHOLD] = { .type = NLA_U32 },
Eric Dumazet9d185622016-05-01 16:47:26 -0700372 [TCA_FQ_CODEL_DROP_BATCH_SIZE] = { .type = NLA_U32 },
Eric Dumazet95b58432016-05-06 08:55:12 -0700373 [TCA_FQ_CODEL_MEMORY_LIMIT] = { .type = NLA_U32 },
Eric Dumazet4b549a22012-05-11 09:30:50 +0000374};
375
Alexander Aring20307212017-12-20 12:35:14 -0500376static int fq_codel_change(struct Qdisc *sch, struct nlattr *opt,
377 struct netlink_ext_ack *extack)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000378{
379 struct fq_codel_sched_data *q = qdisc_priv(sch);
380 struct nlattr *tb[TCA_FQ_CODEL_MAX + 1];
381 int err;
382
383 if (!opt)
384 return -EINVAL;
385
Johannes Berg8cb08172019-04-26 14:07:28 +0200386 err = nla_parse_nested_deprecated(tb, TCA_FQ_CODEL_MAX, opt,
387 fq_codel_policy, NULL);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000388 if (err < 0)
389 return err;
390 if (tb[TCA_FQ_CODEL_FLOWS]) {
391 if (q->flows)
392 return -EINVAL;
393 q->flows_cnt = nla_get_u32(tb[TCA_FQ_CODEL_FLOWS]);
394 if (!q->flows_cnt ||
395 q->flows_cnt > 65536)
396 return -EINVAL;
397 }
398 sch_tree_lock(sch);
399
400 if (tb[TCA_FQ_CODEL_TARGET]) {
401 u64 target = nla_get_u32(tb[TCA_FQ_CODEL_TARGET]);
402
403 q->cparams.target = (target * NSEC_PER_USEC) >> CODEL_SHIFT;
404 }
405
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700406 if (tb[TCA_FQ_CODEL_CE_THRESHOLD]) {
407 u64 val = nla_get_u32(tb[TCA_FQ_CODEL_CE_THRESHOLD]);
408
409 q->cparams.ce_threshold = (val * NSEC_PER_USEC) >> CODEL_SHIFT;
410 }
411
Eric Dumazet4b549a22012-05-11 09:30:50 +0000412 if (tb[TCA_FQ_CODEL_INTERVAL]) {
413 u64 interval = nla_get_u32(tb[TCA_FQ_CODEL_INTERVAL]);
414
415 q->cparams.interval = (interval * NSEC_PER_USEC) >> CODEL_SHIFT;
416 }
417
418 if (tb[TCA_FQ_CODEL_LIMIT])
419 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]);
420
421 if (tb[TCA_FQ_CODEL_ECN])
422 q->cparams.ecn = !!nla_get_u32(tb[TCA_FQ_CODEL_ECN]);
423
424 if (tb[TCA_FQ_CODEL_QUANTUM])
425 q->quantum = max(256U, nla_get_u32(tb[TCA_FQ_CODEL_QUANTUM]));
426
Eric Dumazet9d185622016-05-01 16:47:26 -0700427 if (tb[TCA_FQ_CODEL_DROP_BATCH_SIZE])
428 q->drop_batch_size = min(1U, nla_get_u32(tb[TCA_FQ_CODEL_DROP_BATCH_SIZE]));
429
Eric Dumazet95b58432016-05-06 08:55:12 -0700430 if (tb[TCA_FQ_CODEL_MEMORY_LIMIT])
431 q->memory_limit = min(1U << 31, nla_get_u32(tb[TCA_FQ_CODEL_MEMORY_LIMIT]));
432
433 while (sch->q.qlen > sch->limit ||
434 q->memory_usage > q->memory_limit) {
Eric Dumazet4b549a22012-05-11 09:30:50 +0000435 struct sk_buff *skb = fq_codel_dequeue(sch);
436
WANG Cong2ccccf52016-02-25 14:55:01 -0800437 q->cstats.drop_len += qdisc_pkt_len(skb);
Eric Dumazetece5d4c2016-06-13 20:21:54 -0700438 rtnl_kfree_skbs(skb, skb);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000439 q->cstats.drop_count++;
440 }
WANG Cong2ccccf52016-02-25 14:55:01 -0800441 qdisc_tree_reduce_backlog(sch, q->cstats.drop_count, q->cstats.drop_len);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000442 q->cstats.drop_count = 0;
WANG Cong2ccccf52016-02-25 14:55:01 -0800443 q->cstats.drop_len = 0;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000444
445 sch_tree_unlock(sch);
446 return 0;
447}
448
Eric Dumazet4b549a22012-05-11 09:30:50 +0000449static void fq_codel_destroy(struct Qdisc *sch)
450{
451 struct fq_codel_sched_data *q = qdisc_priv(sch);
452
Jiri Pirko6529eab2017-05-17 11:07:55 +0200453 tcf_block_put(q->block);
Michal Hocko752ade62017-05-08 15:57:27 -0700454 kvfree(q->backlogs);
455 kvfree(q->flows);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000456}
457
Alexander Aringe63d7df2017-12-20 12:35:13 -0500458static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
459 struct netlink_ext_ack *extack)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000460{
461 struct fq_codel_sched_data *q = qdisc_priv(sch);
462 int i;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200463 int err;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000464
465 sch->limit = 10*1024;
466 q->flows_cnt = 1024;
Eric Dumazet95b58432016-05-06 08:55:12 -0700467 q->memory_limit = 32 << 20; /* 32 MBytes */
Eric Dumazet9d185622016-05-01 16:47:26 -0700468 q->drop_batch_size = 64;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000469 q->quantum = psched_mtu(qdisc_dev(sch));
Eric Dumazet4b549a22012-05-11 09:30:50 +0000470 INIT_LIST_HEAD(&q->new_flows);
471 INIT_LIST_HEAD(&q->old_flows);
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200472 codel_params_init(&q->cparams);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000473 codel_stats_init(&q->cstats);
474 q->cparams.ecn = true;
Michal Kazior79bdc4c2016-04-22 14:15:58 +0200475 q->cparams.mtu = psched_mtu(qdisc_dev(sch));
Eric Dumazet4b549a22012-05-11 09:30:50 +0000476
477 if (opt) {
Jacob Keller83fe6b82018-07-10 14:22:27 -0700478 err = fq_codel_change(sch, opt, extack);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000479 if (err)
Jacob Keller83fe6b82018-07-10 14:22:27 -0700480 goto init_failure;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000481 }
482
Alexander Aring8d1a77f2017-12-20 12:35:19 -0500483 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
Jiri Pirko6529eab2017-05-17 11:07:55 +0200484 if (err)
Jacob Keller83fe6b82018-07-10 14:22:27 -0700485 goto init_failure;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200486
Eric Dumazet4b549a22012-05-11 09:30:50 +0000487 if (!q->flows) {
Kees Cook778e1cd2018-06-12 14:04:48 -0700488 q->flows = kvcalloc(q->flows_cnt,
489 sizeof(struct fq_codel_flow),
490 GFP_KERNEL);
Jacob Keller83fe6b82018-07-10 14:22:27 -0700491 if (!q->flows) {
492 err = -ENOMEM;
493 goto init_failure;
494 }
Kees Cook778e1cd2018-06-12 14:04:48 -0700495 q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
Jacob Keller83fe6b82018-07-10 14:22:27 -0700496 if (!q->backlogs) {
497 err = -ENOMEM;
498 goto alloc_failure;
499 }
Eric Dumazet4b549a22012-05-11 09:30:50 +0000500 for (i = 0; i < q->flows_cnt; i++) {
501 struct fq_codel_flow *flow = q->flows + i;
502
503 INIT_LIST_HEAD(&flow->flowchain);
Eric Dumazetb3791352012-09-01 03:19:57 +0000504 codel_vars_init(&flow->cvars);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000505 }
506 }
507 if (sch->limit >= 1)
508 sch->flags |= TCQ_F_CAN_BYPASS;
509 else
510 sch->flags &= ~TCQ_F_CAN_BYPASS;
511 return 0;
Jacob Keller83fe6b82018-07-10 14:22:27 -0700512
513alloc_failure:
514 kvfree(q->flows);
515 q->flows = NULL;
516init_failure:
517 q->flows_cnt = 0;
518 return err;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000519}
520
521static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
522{
523 struct fq_codel_sched_data *q = qdisc_priv(sch);
524 struct nlattr *opts;
525
Michal Kubecekae0be8d2019-04-26 11:13:06 +0200526 opts = nla_nest_start_noflag(skb, TCA_OPTIONS);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000527 if (opts == NULL)
528 goto nla_put_failure;
529
530 if (nla_put_u32(skb, TCA_FQ_CODEL_TARGET,
531 codel_time_to_us(q->cparams.target)) ||
532 nla_put_u32(skb, TCA_FQ_CODEL_LIMIT,
533 sch->limit) ||
534 nla_put_u32(skb, TCA_FQ_CODEL_INTERVAL,
535 codel_time_to_us(q->cparams.interval)) ||
536 nla_put_u32(skb, TCA_FQ_CODEL_ECN,
537 q->cparams.ecn) ||
538 nla_put_u32(skb, TCA_FQ_CODEL_QUANTUM,
539 q->quantum) ||
Eric Dumazet9d185622016-05-01 16:47:26 -0700540 nla_put_u32(skb, TCA_FQ_CODEL_DROP_BATCH_SIZE,
541 q->drop_batch_size) ||
Eric Dumazet95b58432016-05-06 08:55:12 -0700542 nla_put_u32(skb, TCA_FQ_CODEL_MEMORY_LIMIT,
543 q->memory_limit) ||
Eric Dumazet4b549a22012-05-11 09:30:50 +0000544 nla_put_u32(skb, TCA_FQ_CODEL_FLOWS,
545 q->flows_cnt))
546 goto nla_put_failure;
547
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700548 if (q->cparams.ce_threshold != CODEL_DISABLED_THRESHOLD &&
549 nla_put_u32(skb, TCA_FQ_CODEL_CE_THRESHOLD,
550 codel_time_to_us(q->cparams.ce_threshold)))
551 goto nla_put_failure;
552
Yang Yingliangd59b7d82014-03-12 10:20:32 +0800553 return nla_nest_end(skb, opts);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000554
555nla_put_failure:
556 return -1;
557}
558
559static int fq_codel_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
560{
561 struct fq_codel_sched_data *q = qdisc_priv(sch);
562 struct tc_fq_codel_xstats st = {
563 .type = TCA_FQ_CODEL_XSTATS_QDISC,
Eric Dumazet4b549a22012-05-11 09:30:50 +0000564 };
565 struct list_head *pos;
566
Sasha Levin669d67b2012-05-14 11:57:06 +0000567 st.qdisc_stats.maxpacket = q->cstats.maxpacket;
568 st.qdisc_stats.drop_overlimit = q->drop_overlimit;
569 st.qdisc_stats.ecn_mark = q->cstats.ecn_mark;
570 st.qdisc_stats.new_flow_count = q->new_flow_count;
Eric Dumazet80ba92f2015-05-08 15:05:12 -0700571 st.qdisc_stats.ce_mark = q->cstats.ce_mark;
Eric Dumazet95b58432016-05-06 08:55:12 -0700572 st.qdisc_stats.memory_usage = q->memory_usage;
573 st.qdisc_stats.drop_overmemory = q->drop_overmemory;
Sasha Levin669d67b2012-05-14 11:57:06 +0000574
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700575 sch_tree_lock(sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000576 list_for_each(pos, &q->new_flows)
577 st.qdisc_stats.new_flows_len++;
578
579 list_for_each(pos, &q->old_flows)
580 st.qdisc_stats.old_flows_len++;
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700581 sch_tree_unlock(sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000582
583 return gnet_stats_copy_app(d, &st, sizeof(st));
584}
585
586static struct Qdisc *fq_codel_leaf(struct Qdisc *sch, unsigned long arg)
587{
588 return NULL;
589}
590
WANG Cong143976c2017-08-24 16:51:29 -0700591static unsigned long fq_codel_find(struct Qdisc *sch, u32 classid)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000592{
593 return 0;
594}
595
596static unsigned long fq_codel_bind(struct Qdisc *sch, unsigned long parent,
597 u32 classid)
598{
599 /* we cannot bypass queue discipline anymore */
600 sch->flags &= ~TCQ_F_CAN_BYPASS;
601 return 0;
602}
603
WANG Cong143976c2017-08-24 16:51:29 -0700604static void fq_codel_unbind(struct Qdisc *q, unsigned long cl)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000605{
606}
607
Alexander Aringcbaacc42017-12-20 12:35:16 -0500608static struct tcf_block *fq_codel_tcf_block(struct Qdisc *sch, unsigned long cl,
609 struct netlink_ext_ack *extack)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000610{
611 struct fq_codel_sched_data *q = qdisc_priv(sch);
612
613 if (cl)
614 return NULL;
Jiri Pirko6529eab2017-05-17 11:07:55 +0200615 return q->block;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000616}
617
618static int fq_codel_dump_class(struct Qdisc *sch, unsigned long cl,
619 struct sk_buff *skb, struct tcmsg *tcm)
620{
621 tcm->tcm_handle |= TC_H_MIN(cl);
622 return 0;
623}
624
625static int fq_codel_dump_class_stats(struct Qdisc *sch, unsigned long cl,
626 struct gnet_dump *d)
627{
628 struct fq_codel_sched_data *q = qdisc_priv(sch);
629 u32 idx = cl - 1;
630 struct gnet_stats_queue qs = { 0 };
631 struct tc_fq_codel_xstats xstats;
632
633 if (idx < q->flows_cnt) {
634 const struct fq_codel_flow *flow = &q->flows[idx];
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700635 const struct sk_buff *skb;
Eric Dumazet4b549a22012-05-11 09:30:50 +0000636
637 memset(&xstats, 0, sizeof(xstats));
638 xstats.type = TCA_FQ_CODEL_XSTATS_CLASS;
639 xstats.class_stats.deficit = flow->deficit;
640 xstats.class_stats.ldelay =
641 codel_time_to_us(flow->cvars.ldelay);
642 xstats.class_stats.count = flow->cvars.count;
643 xstats.class_stats.lastcount = flow->cvars.lastcount;
644 xstats.class_stats.dropping = flow->cvars.dropping;
645 if (flow->cvars.dropping) {
646 codel_tdiff_t delta = flow->cvars.drop_next -
647 codel_get_time();
648
649 xstats.class_stats.drop_next = (delta >= 0) ?
650 codel_time_to_us(delta) :
651 -codel_time_to_us(-delta);
652 }
Eric Dumazetedb09eb2016-06-06 09:37:16 -0700653 if (flow->head) {
654 sch_tree_lock(sch);
655 skb = flow->head;
656 while (skb) {
657 qs.qlen++;
658 skb = skb->next;
659 }
660 sch_tree_unlock(sch);
Eric Dumazet4b549a22012-05-11 09:30:50 +0000661 }
662 qs.backlog = q->backlogs[idx];
663 qs.drops = flow->dropped;
664 }
Eric Dumazetaafddbf2016-06-06 09:12:39 -0700665 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
Eric Dumazet4b549a22012-05-11 09:30:50 +0000666 return -1;
667 if (idx < q->flows_cnt)
668 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
669 return 0;
670}
671
672static void fq_codel_walk(struct Qdisc *sch, struct qdisc_walker *arg)
673{
674 struct fq_codel_sched_data *q = qdisc_priv(sch);
675 unsigned int i;
676
677 if (arg->stop)
678 return;
679
680 for (i = 0; i < q->flows_cnt; i++) {
681 if (list_empty(&q->flows[i].flowchain) ||
682 arg->count < arg->skip) {
683 arg->count++;
684 continue;
685 }
686 if (arg->fn(sch, i + 1, arg) < 0) {
687 arg->stop = 1;
688 break;
689 }
690 arg->count++;
691 }
692}
693
694static const struct Qdisc_class_ops fq_codel_class_ops = {
695 .leaf = fq_codel_leaf,
WANG Cong143976c2017-08-24 16:51:29 -0700696 .find = fq_codel_find,
Jiri Pirko6529eab2017-05-17 11:07:55 +0200697 .tcf_block = fq_codel_tcf_block,
Eric Dumazet4b549a22012-05-11 09:30:50 +0000698 .bind_tcf = fq_codel_bind,
WANG Cong143976c2017-08-24 16:51:29 -0700699 .unbind_tcf = fq_codel_unbind,
Eric Dumazet4b549a22012-05-11 09:30:50 +0000700 .dump = fq_codel_dump_class,
701 .dump_stats = fq_codel_dump_class_stats,
702 .walk = fq_codel_walk,
703};
704
705static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
706 .cl_ops = &fq_codel_class_ops,
707 .id = "fq_codel",
708 .priv_size = sizeof(struct fq_codel_sched_data),
709 .enqueue = fq_codel_enqueue,
710 .dequeue = fq_codel_dequeue,
711 .peek = qdisc_peek_dequeued,
Eric Dumazet4b549a22012-05-11 09:30:50 +0000712 .init = fq_codel_init,
713 .reset = fq_codel_reset,
714 .destroy = fq_codel_destroy,
715 .change = fq_codel_change,
716 .dump = fq_codel_dump,
717 .dump_stats = fq_codel_dump_stats,
718 .owner = THIS_MODULE,
719};
720
721static int __init fq_codel_module_init(void)
722{
723 return register_qdisc(&fq_codel_qdisc_ops);
724}
725
726static void __exit fq_codel_module_exit(void)
727{
728 unregister_qdisc(&fq_codel_qdisc_ops);
729}
730
731module_init(fq_codel_module_init)
732module_exit(fq_codel_module_exit)
733MODULE_AUTHOR("Eric Dumazet");
734MODULE_LICENSE("GPL");