blob: 633ca1578114d2eba984553ba279ce0f8bd1ded5 [file] [log] [blame]
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2
3/* COMMON Applications Kept Enhanced (CAKE) discipline
4 *
5 * Copyright (C) 2014-2018 Jonathan Morton <chromatix99@gmail.com>
6 * Copyright (C) 2015-2018 Toke Høiland-Jørgensen <toke@toke.dk>
7 * Copyright (C) 2014-2018 Dave Täht <dave.taht@gmail.com>
8 * Copyright (C) 2015-2018 Sebastian Moeller <moeller0@gmx.de>
9 * (C) 2015-2018 Kevin Darbyshire-Bryant <kevin@darbyshire-bryant.me.uk>
10 * Copyright (C) 2017-2018 Ryan Mounce <ryan@mounce.com.au>
11 *
12 * The CAKE Principles:
13 * (or, how to have your cake and eat it too)
14 *
15 * This is a combination of several shaping, AQM and FQ techniques into one
16 * easy-to-use package:
17 *
18 * - An overall bandwidth shaper, to move the bottleneck away from dumb CPE
19 * equipment and bloated MACs. This operates in deficit mode (as in sch_fq),
20 * eliminating the need for any sort of burst parameter (eg. token bucket
21 * depth). Burst support is limited to that necessary to overcome scheduling
22 * latency.
23 *
24 * - A Diffserv-aware priority queue, giving more priority to certain classes,
25 * up to a specified fraction of bandwidth. Above that bandwidth threshold,
26 * the priority is reduced to avoid starving other tins.
27 *
28 * - Each priority tin has a separate Flow Queue system, to isolate traffic
29 * flows from each other. This prevents a burst on one flow from increasing
30 * the delay to another. Flows are distributed to queues using a
31 * set-associative hash function.
32 *
33 * - Each queue is actively managed by Cobalt, which is a combination of the
34 * Codel and Blue AQM algorithms. This serves flows fairly, and signals
35 * congestion early via ECN (if available) and/or packet drops, to keep
36 * latency low. The codel parameters are auto-tuned based on the bandwidth
37 * setting, as is necessary at low bandwidths.
38 *
39 * The configuration parameters are kept deliberately simple for ease of use.
40 * Everything has sane defaults. Complete generality of configuration is *not*
41 * a goal.
42 *
43 * The priority queue operates according to a weighted DRR scheme, combined with
44 * a bandwidth tracker which reuses the shaper logic to detect which side of the
45 * bandwidth sharing threshold the tin is operating. This determines whether a
46 * priority-based weight (high) or a bandwidth-based weight (low) is used for
47 * that tin in the current pass.
48 *
49 * This qdisc was inspired by Eric Dumazet's fq_codel code, which he kindly
50 * granted us permission to leverage.
51 */
52
53#include <linux/module.h>
54#include <linux/types.h>
55#include <linux/kernel.h>
56#include <linux/jiffies.h>
57#include <linux/string.h>
58#include <linux/in.h>
59#include <linux/errno.h>
60#include <linux/init.h>
61#include <linux/skbuff.h>
62#include <linux/jhash.h>
63#include <linux/slab.h>
64#include <linux/vmalloc.h>
65#include <linux/reciprocal_div.h>
66#include <net/netlink.h>
67#include <linux/version.h>
68#include <linux/if_vlan.h>
69#include <net/pkt_sched.h>
70#include <net/pkt_cls.h>
71#include <net/tcp.h>
72#include <net/flow_dissector.h>
73
Toke Høiland-Jørgensenea825112018-07-06 17:37:19 +020074#if IS_ENABLED(CONFIG_NF_CONNTRACK)
75#include <net/netfilter/nf_conntrack_core.h>
76#endif
77
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +020078#define CAKE_SET_WAYS (8)
79#define CAKE_MAX_TINS (8)
80#define CAKE_QUEUES (1024)
81#define CAKE_FLOW_MASK 63
82#define CAKE_FLOW_NAT_FLAG 64
83
84/* struct cobalt_params - contains codel and blue parameters
85 * @interval: codel initial drop rate
86 * @target: maximum persistent sojourn time & blue update rate
87 * @mtu_time: serialisation delay of maximum-size packet
88 * @p_inc: increment of blue drop probability (0.32 fxp)
89 * @p_dec: decrement of blue drop probability (0.32 fxp)
90 */
91struct cobalt_params {
92 u64 interval;
93 u64 target;
94 u64 mtu_time;
95 u32 p_inc;
96 u32 p_dec;
97};
98
99/* struct cobalt_vars - contains codel and blue variables
100 * @count: codel dropping frequency
101 * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
102 * @drop_next: time to drop next packet, or when we dropped last
103 * @blue_timer: Blue time to next drop
104 * @p_drop: BLUE drop probability (0.32 fxp)
105 * @dropping: set if in dropping state
106 * @ecn_marked: set if marked
107 */
108struct cobalt_vars {
109 u32 count;
110 u32 rec_inv_sqrt;
111 ktime_t drop_next;
112 ktime_t blue_timer;
113 u32 p_drop;
114 bool dropping;
115 bool ecn_marked;
116};
117
118enum {
119 CAKE_SET_NONE = 0,
120 CAKE_SET_SPARSE,
121 CAKE_SET_SPARSE_WAIT, /* counted in SPARSE, actually in BULK */
122 CAKE_SET_BULK,
123 CAKE_SET_DECAYING
124};
125
126struct cake_flow {
127 /* this stuff is all needed per-flow at dequeue time */
128 struct sk_buff *head;
129 struct sk_buff *tail;
130 struct list_head flowchain;
131 s32 deficit;
132 u32 dropped;
133 struct cobalt_vars cvars;
134 u16 srchost; /* index into cake_host table */
135 u16 dsthost;
136 u8 set;
137}; /* please try to keep this structure <= 64 bytes */
138
139struct cake_host {
140 u32 srchost_tag;
141 u32 dsthost_tag;
142 u16 srchost_refcnt;
143 u16 dsthost_refcnt;
144};
145
146struct cake_heap_entry {
147 u16 t:3, b:10;
148};
149
150struct cake_tin_data {
151 struct cake_flow flows[CAKE_QUEUES];
152 u32 backlogs[CAKE_QUEUES];
153 u32 tags[CAKE_QUEUES]; /* for set association */
154 u16 overflow_idx[CAKE_QUEUES];
155 struct cake_host hosts[CAKE_QUEUES]; /* for triple isolation */
156 u16 flow_quantum;
157
158 struct cobalt_params cparams;
159 u32 drop_overlimit;
160 u16 bulk_flow_count;
161 u16 sparse_flow_count;
162 u16 decaying_flow_count;
163 u16 unresponsive_flow_count;
164
165 u32 max_skblen;
166
167 struct list_head new_flows;
168 struct list_head old_flows;
169 struct list_head decaying_flows;
170
171 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
172 ktime_t time_next_packet;
173 u64 tin_rate_ns;
174 u64 tin_rate_bps;
175 u16 tin_rate_shft;
176
177 u16 tin_quantum_prio;
178 u16 tin_quantum_band;
179 s32 tin_deficit;
180 u32 tin_backlog;
181 u32 tin_dropped;
182 u32 tin_ecn_mark;
183
184 u32 packets;
185 u64 bytes;
186
187 u32 ack_drops;
188
189 /* moving averages */
190 u64 avge_delay;
191 u64 peak_delay;
192 u64 base_delay;
193
194 /* hash function stats */
195 u32 way_directs;
196 u32 way_hits;
197 u32 way_misses;
198 u32 way_collisions;
199}; /* number of tins is small, so size of this struct doesn't matter much */
200
201struct cake_sched_data {
202 struct tcf_proto __rcu *filter_list; /* optional external classifier */
203 struct tcf_block *block;
204 struct cake_tin_data *tins;
205
206 struct cake_heap_entry overflow_heap[CAKE_QUEUES * CAKE_MAX_TINS];
207 u16 overflow_timeout;
208
209 u16 tin_cnt;
210 u8 tin_mode;
211 u8 flow_mode;
212 u8 ack_filter;
213 u8 atm_mode;
214
215 /* time_next = time_this + ((len * rate_ns) >> rate_shft) */
216 u16 rate_shft;
217 ktime_t time_next_packet;
218 ktime_t failsafe_next_packet;
219 u64 rate_ns;
220 u64 rate_bps;
221 u16 rate_flags;
222 s16 rate_overhead;
223 u16 rate_mpu;
224 u64 interval;
225 u64 target;
226
227 /* resource tracking */
228 u32 buffer_used;
229 u32 buffer_max_used;
230 u32 buffer_limit;
231 u32 buffer_config_limit;
232
233 /* indices for dequeue */
234 u16 cur_tin;
235 u16 cur_flow;
236
237 struct qdisc_watchdog watchdog;
238 const u8 *tin_index;
239 const u8 *tin_order;
240
241 /* bandwidth capacity estimate */
242 ktime_t last_packet_time;
243 ktime_t avg_window_begin;
244 u64 avg_packet_interval;
245 u64 avg_window_bytes;
246 u64 avg_peak_bandwidth;
247 ktime_t last_reconfig_time;
248
249 /* packet length stats */
250 u32 avg_netoff;
251 u16 max_netlen;
252 u16 max_adjlen;
253 u16 min_netlen;
254 u16 min_adjlen;
255};
256
257enum {
258 CAKE_FLAG_OVERHEAD = BIT(0),
259 CAKE_FLAG_AUTORATE_INGRESS = BIT(1),
260 CAKE_FLAG_INGRESS = BIT(2),
261 CAKE_FLAG_WASH = BIT(3),
262 CAKE_FLAG_SPLIT_GSO = BIT(4)
263};
264
265/* COBALT operates the Codel and BLUE algorithms in parallel, in order to
266 * obtain the best features of each. Codel is excellent on flows which
267 * respond to congestion signals in a TCP-like way. BLUE is more effective on
268 * unresponsive flows.
269 */
270
271struct cobalt_skb_cb {
272 ktime_t enqueue_time;
273};
274
275static u64 us_to_ns(u64 us)
276{
277 return us * NSEC_PER_USEC;
278}
279
280static struct cobalt_skb_cb *get_cobalt_cb(const struct sk_buff *skb)
281{
282 qdisc_cb_private_validate(skb, sizeof(struct cobalt_skb_cb));
283 return (struct cobalt_skb_cb *)qdisc_skb_cb(skb)->data;
284}
285
286static ktime_t cobalt_get_enqueue_time(const struct sk_buff *skb)
287{
288 return get_cobalt_cb(skb)->enqueue_time;
289}
290
291static void cobalt_set_enqueue_time(struct sk_buff *skb,
292 ktime_t now)
293{
294 get_cobalt_cb(skb)->enqueue_time = now;
295}
296
297static u16 quantum_div[CAKE_QUEUES + 1] = {0};
298
299#define REC_INV_SQRT_CACHE (16)
300static u32 cobalt_rec_inv_sqrt_cache[REC_INV_SQRT_CACHE] = {0};
301
302/* http://en.wikipedia.org/wiki/Methods_of_computing_square_roots
303 * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
304 *
305 * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
306 */
307
308static void cobalt_newton_step(struct cobalt_vars *vars)
309{
310 u32 invsqrt, invsqrt2;
311 u64 val;
312
313 invsqrt = vars->rec_inv_sqrt;
314 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
315 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
316
317 val >>= 2; /* avoid overflow in following multiply */
318 val = (val * invsqrt) >> (32 - 2 + 1);
319
320 vars->rec_inv_sqrt = val;
321}
322
323static void cobalt_invsqrt(struct cobalt_vars *vars)
324{
325 if (vars->count < REC_INV_SQRT_CACHE)
326 vars->rec_inv_sqrt = cobalt_rec_inv_sqrt_cache[vars->count];
327 else
328 cobalt_newton_step(vars);
329}
330
331/* There is a big difference in timing between the accurate values placed in
332 * the cache and the approximations given by a single Newton step for small
333 * count values, particularly when stepping from count 1 to 2 or vice versa.
334 * Above 16, a single Newton step gives sufficient accuracy in either
335 * direction, given the precision stored.
336 *
337 * The magnitude of the error when stepping up to count 2 is such as to give
338 * the value that *should* have been produced at count 4.
339 */
340
341static void cobalt_cache_init(void)
342{
343 struct cobalt_vars v;
344
345 memset(&v, 0, sizeof(v));
346 v.rec_inv_sqrt = ~0U;
347 cobalt_rec_inv_sqrt_cache[0] = v.rec_inv_sqrt;
348
349 for (v.count = 1; v.count < REC_INV_SQRT_CACHE; v.count++) {
350 cobalt_newton_step(&v);
351 cobalt_newton_step(&v);
352 cobalt_newton_step(&v);
353 cobalt_newton_step(&v);
354
355 cobalt_rec_inv_sqrt_cache[v.count] = v.rec_inv_sqrt;
356 }
357}
358
359static void cobalt_vars_init(struct cobalt_vars *vars)
360{
361 memset(vars, 0, sizeof(*vars));
362
363 if (!cobalt_rec_inv_sqrt_cache[0]) {
364 cobalt_cache_init();
365 cobalt_rec_inv_sqrt_cache[0] = ~0;
366 }
367}
368
369/* CoDel control_law is t + interval/sqrt(count)
370 * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
371 * both sqrt() and divide operation.
372 */
373static ktime_t cobalt_control(ktime_t t,
374 u64 interval,
375 u32 rec_inv_sqrt)
376{
377 return ktime_add_ns(t, reciprocal_scale(interval,
378 rec_inv_sqrt));
379}
380
381/* Call this when a packet had to be dropped due to queue overflow. Returns
382 * true if the BLUE state was quiescent before but active after this call.
383 */
384static bool cobalt_queue_full(struct cobalt_vars *vars,
385 struct cobalt_params *p,
386 ktime_t now)
387{
388 bool up = false;
389
390 if (ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
391 up = !vars->p_drop;
392 vars->p_drop += p->p_inc;
393 if (vars->p_drop < p->p_inc)
394 vars->p_drop = ~0;
395 vars->blue_timer = now;
396 }
397 vars->dropping = true;
398 vars->drop_next = now;
399 if (!vars->count)
400 vars->count = 1;
401
402 return up;
403}
404
405/* Call this when the queue was serviced but turned out to be empty. Returns
406 * true if the BLUE state was active before but quiescent after this call.
407 */
408static bool cobalt_queue_empty(struct cobalt_vars *vars,
409 struct cobalt_params *p,
410 ktime_t now)
411{
412 bool down = false;
413
414 if (vars->p_drop &&
415 ktime_to_ns(ktime_sub(now, vars->blue_timer)) > p->target) {
416 if (vars->p_drop < p->p_dec)
417 vars->p_drop = 0;
418 else
419 vars->p_drop -= p->p_dec;
420 vars->blue_timer = now;
421 down = !vars->p_drop;
422 }
423 vars->dropping = false;
424
425 if (vars->count && ktime_to_ns(ktime_sub(now, vars->drop_next)) >= 0) {
426 vars->count--;
427 cobalt_invsqrt(vars);
428 vars->drop_next = cobalt_control(vars->drop_next,
429 p->interval,
430 vars->rec_inv_sqrt);
431 }
432
433 return down;
434}
435
436/* Call this with a freshly dequeued packet for possible congestion marking.
437 * Returns true as an instruction to drop the packet, false for delivery.
438 */
439static bool cobalt_should_drop(struct cobalt_vars *vars,
440 struct cobalt_params *p,
441 ktime_t now,
Toke Høiland-Jørgensen7298de92018-07-06 17:37:19 +0200442 struct sk_buff *skb,
443 u32 bulk_flows)
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +0200444{
445 bool next_due, over_target, drop = false;
446 ktime_t schedule;
447 u64 sojourn;
448
449/* The 'schedule' variable records, in its sign, whether 'now' is before or
450 * after 'drop_next'. This allows 'drop_next' to be updated before the next
451 * scheduling decision is actually branched, without destroying that
452 * information. Similarly, the first 'schedule' value calculated is preserved
453 * in the boolean 'next_due'.
454 *
455 * As for 'drop_next', we take advantage of the fact that 'interval' is both
456 * the delay between first exceeding 'target' and the first signalling event,
457 * *and* the scaling factor for the signalling frequency. It's therefore very
458 * natural to use a single mechanism for both purposes, and eliminates a
459 * significant amount of reference Codel's spaghetti code. To help with this,
460 * both the '0' and '1' entries in the invsqrt cache are 0xFFFFFFFF, as close
461 * as possible to 1.0 in fixed-point.
462 */
463
464 sojourn = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
465 schedule = ktime_sub(now, vars->drop_next);
466 over_target = sojourn > p->target &&
Toke Høiland-Jørgensen7298de92018-07-06 17:37:19 +0200467 sojourn > p->mtu_time * bulk_flows * 2 &&
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +0200468 sojourn > p->mtu_time * 4;
469 next_due = vars->count && ktime_to_ns(schedule) >= 0;
470
471 vars->ecn_marked = false;
472
473 if (over_target) {
474 if (!vars->dropping) {
475 vars->dropping = true;
476 vars->drop_next = cobalt_control(now,
477 p->interval,
478 vars->rec_inv_sqrt);
479 }
480 if (!vars->count)
481 vars->count = 1;
482 } else if (vars->dropping) {
483 vars->dropping = false;
484 }
485
486 if (next_due && vars->dropping) {
487 /* Use ECN mark if possible, otherwise drop */
488 drop = !(vars->ecn_marked = INET_ECN_set_ce(skb));
489
490 vars->count++;
491 if (!vars->count)
492 vars->count--;
493 cobalt_invsqrt(vars);
494 vars->drop_next = cobalt_control(vars->drop_next,
495 p->interval,
496 vars->rec_inv_sqrt);
497 schedule = ktime_sub(now, vars->drop_next);
498 } else {
499 while (next_due) {
500 vars->count--;
501 cobalt_invsqrt(vars);
502 vars->drop_next = cobalt_control(vars->drop_next,
503 p->interval,
504 vars->rec_inv_sqrt);
505 schedule = ktime_sub(now, vars->drop_next);
506 next_due = vars->count && ktime_to_ns(schedule) >= 0;
507 }
508 }
509
510 /* Simple BLUE implementation. Lack of ECN is deliberate. */
511 if (vars->p_drop)
512 drop |= (prandom_u32() < vars->p_drop);
513
514 /* Overload the drop_next field as an activity timeout */
515 if (!vars->count)
516 vars->drop_next = ktime_add_ns(now, p->interval);
517 else if (ktime_to_ns(schedule) > 0 && !drop)
518 vars->drop_next = now;
519
520 return drop;
521}
522
Toke Høiland-Jørgensenea825112018-07-06 17:37:19 +0200523static void cake_update_flowkeys(struct flow_keys *keys,
524 const struct sk_buff *skb)
525{
526#if IS_ENABLED(CONFIG_NF_CONNTRACK)
527 struct nf_conntrack_tuple tuple = {};
528 bool rev = !skb->_nfct;
529
530 if (tc_skb_protocol(skb) != htons(ETH_P_IP))
531 return;
532
533 if (!nf_ct_get_tuple_skb(&tuple, skb))
534 return;
535
536 keys->addrs.v4addrs.src = rev ? tuple.dst.u3.ip : tuple.src.u3.ip;
537 keys->addrs.v4addrs.dst = rev ? tuple.src.u3.ip : tuple.dst.u3.ip;
538
539 if (keys->ports.ports) {
540 keys->ports.src = rev ? tuple.dst.u.all : tuple.src.u.all;
541 keys->ports.dst = rev ? tuple.src.u.all : tuple.dst.u.all;
542 }
543#endif
544}
545
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +0200546/* Cake has several subtle multiple bit settings. In these cases you
547 * would be matching triple isolate mode as well.
548 */
549
550static bool cake_dsrc(int flow_mode)
551{
552 return (flow_mode & CAKE_FLOW_DUAL_SRC) == CAKE_FLOW_DUAL_SRC;
553}
554
555static bool cake_ddst(int flow_mode)
556{
557 return (flow_mode & CAKE_FLOW_DUAL_DST) == CAKE_FLOW_DUAL_DST;
558}
559
560static u32 cake_hash(struct cake_tin_data *q, const struct sk_buff *skb,
561 int flow_mode)
562{
563 u32 flow_hash = 0, srchost_hash, dsthost_hash;
564 u16 reduced_hash, srchost_idx, dsthost_idx;
565 struct flow_keys keys, host_keys;
566
567 if (unlikely(flow_mode == CAKE_FLOW_NONE))
568 return 0;
569
570 skb_flow_dissect_flow_keys(skb, &keys,
571 FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL);
572
Toke Høiland-Jørgensenea825112018-07-06 17:37:19 +0200573 if (flow_mode & CAKE_FLOW_NAT_FLAG)
574 cake_update_flowkeys(&keys, skb);
575
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +0200576 /* flow_hash_from_keys() sorts the addresses by value, so we have
577 * to preserve their order in a separate data structure to treat
578 * src and dst host addresses as independently selectable.
579 */
580 host_keys = keys;
581 host_keys.ports.ports = 0;
582 host_keys.basic.ip_proto = 0;
583 host_keys.keyid.keyid = 0;
584 host_keys.tags.flow_label = 0;
585
586 switch (host_keys.control.addr_type) {
587 case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
588 host_keys.addrs.v4addrs.src = 0;
589 dsthost_hash = flow_hash_from_keys(&host_keys);
590 host_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
591 host_keys.addrs.v4addrs.dst = 0;
592 srchost_hash = flow_hash_from_keys(&host_keys);
593 break;
594
595 case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
596 memset(&host_keys.addrs.v6addrs.src, 0,
597 sizeof(host_keys.addrs.v6addrs.src));
598 dsthost_hash = flow_hash_from_keys(&host_keys);
599 host_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
600 memset(&host_keys.addrs.v6addrs.dst, 0,
601 sizeof(host_keys.addrs.v6addrs.dst));
602 srchost_hash = flow_hash_from_keys(&host_keys);
603 break;
604
605 default:
606 dsthost_hash = 0;
607 srchost_hash = 0;
608 }
609
610 /* This *must* be after the above switch, since as a
611 * side-effect it sorts the src and dst addresses.
612 */
613 if (flow_mode & CAKE_FLOW_FLOWS)
614 flow_hash = flow_hash_from_keys(&keys);
615
616 if (!(flow_mode & CAKE_FLOW_FLOWS)) {
617 if (flow_mode & CAKE_FLOW_SRC_IP)
618 flow_hash ^= srchost_hash;
619
620 if (flow_mode & CAKE_FLOW_DST_IP)
621 flow_hash ^= dsthost_hash;
622 }
623
624 reduced_hash = flow_hash % CAKE_QUEUES;
625
626 /* set-associative hashing */
627 /* fast path if no hash collision (direct lookup succeeds) */
628 if (likely(q->tags[reduced_hash] == flow_hash &&
629 q->flows[reduced_hash].set)) {
630 q->way_directs++;
631 } else {
632 u32 inner_hash = reduced_hash % CAKE_SET_WAYS;
633 u32 outer_hash = reduced_hash - inner_hash;
634 bool allocate_src = false;
635 bool allocate_dst = false;
636 u32 i, k;
637
638 /* check if any active queue in the set is reserved for
639 * this flow.
640 */
641 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
642 i++, k = (k + 1) % CAKE_SET_WAYS) {
643 if (q->tags[outer_hash + k] == flow_hash) {
644 if (i)
645 q->way_hits++;
646
647 if (!q->flows[outer_hash + k].set) {
648 /* need to increment host refcnts */
649 allocate_src = cake_dsrc(flow_mode);
650 allocate_dst = cake_ddst(flow_mode);
651 }
652
653 goto found;
654 }
655 }
656
657 /* no queue is reserved for this flow, look for an
658 * empty one.
659 */
660 for (i = 0; i < CAKE_SET_WAYS;
661 i++, k = (k + 1) % CAKE_SET_WAYS) {
662 if (!q->flows[outer_hash + k].set) {
663 q->way_misses++;
664 allocate_src = cake_dsrc(flow_mode);
665 allocate_dst = cake_ddst(flow_mode);
666 goto found;
667 }
668 }
669
670 /* With no empty queues, default to the original
671 * queue, accept the collision, update the host tags.
672 */
673 q->way_collisions++;
674 q->hosts[q->flows[reduced_hash].srchost].srchost_refcnt--;
675 q->hosts[q->flows[reduced_hash].dsthost].dsthost_refcnt--;
676 allocate_src = cake_dsrc(flow_mode);
677 allocate_dst = cake_ddst(flow_mode);
678found:
679 /* reserve queue for future packets in same flow */
680 reduced_hash = outer_hash + k;
681 q->tags[reduced_hash] = flow_hash;
682
683 if (allocate_src) {
684 srchost_idx = srchost_hash % CAKE_QUEUES;
685 inner_hash = srchost_idx % CAKE_SET_WAYS;
686 outer_hash = srchost_idx - inner_hash;
687 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
688 i++, k = (k + 1) % CAKE_SET_WAYS) {
689 if (q->hosts[outer_hash + k].srchost_tag ==
690 srchost_hash)
691 goto found_src;
692 }
693 for (i = 0; i < CAKE_SET_WAYS;
694 i++, k = (k + 1) % CAKE_SET_WAYS) {
695 if (!q->hosts[outer_hash + k].srchost_refcnt)
696 break;
697 }
698 q->hosts[outer_hash + k].srchost_tag = srchost_hash;
699found_src:
700 srchost_idx = outer_hash + k;
701 q->hosts[srchost_idx].srchost_refcnt++;
702 q->flows[reduced_hash].srchost = srchost_idx;
703 }
704
705 if (allocate_dst) {
706 dsthost_idx = dsthost_hash % CAKE_QUEUES;
707 inner_hash = dsthost_idx % CAKE_SET_WAYS;
708 outer_hash = dsthost_idx - inner_hash;
709 for (i = 0, k = inner_hash; i < CAKE_SET_WAYS;
710 i++, k = (k + 1) % CAKE_SET_WAYS) {
711 if (q->hosts[outer_hash + k].dsthost_tag ==
712 dsthost_hash)
713 goto found_dst;
714 }
715 for (i = 0; i < CAKE_SET_WAYS;
716 i++, k = (k + 1) % CAKE_SET_WAYS) {
717 if (!q->hosts[outer_hash + k].dsthost_refcnt)
718 break;
719 }
720 q->hosts[outer_hash + k].dsthost_tag = dsthost_hash;
721found_dst:
722 dsthost_idx = outer_hash + k;
723 q->hosts[dsthost_idx].dsthost_refcnt++;
724 q->flows[reduced_hash].dsthost = dsthost_idx;
725 }
726 }
727
728 return reduced_hash;
729}
730
731/* helper functions : might be changed when/if skb use a standard list_head */
732/* remove one skb from head of slot queue */
733
734static struct sk_buff *dequeue_head(struct cake_flow *flow)
735{
736 struct sk_buff *skb = flow->head;
737
738 if (skb) {
739 flow->head = skb->next;
740 skb->next = NULL;
741 }
742
743 return skb;
744}
745
746/* add skb to flow queue (tail add) */
747
748static void flow_queue_add(struct cake_flow *flow, struct sk_buff *skb)
749{
750 if (!flow->head)
751 flow->head = skb;
752 else
753 flow->tail->next = skb;
754 flow->tail = skb;
755 skb->next = NULL;
756}
757
Toke Høiland-Jørgensen8b713882018-07-06 17:37:19 +0200758static struct iphdr *cake_get_iphdr(const struct sk_buff *skb,
759 struct ipv6hdr *buf)
760{
761 unsigned int offset = skb_network_offset(skb);
762 struct iphdr *iph;
763
764 iph = skb_header_pointer(skb, offset, sizeof(struct iphdr), buf);
765
766 if (!iph)
767 return NULL;
768
769 if (iph->version == 4 && iph->protocol == IPPROTO_IPV6)
770 return skb_header_pointer(skb, offset + iph->ihl * 4,
771 sizeof(struct ipv6hdr), buf);
772
773 else if (iph->version == 4)
774 return iph;
775
776 else if (iph->version == 6)
777 return skb_header_pointer(skb, offset, sizeof(struct ipv6hdr),
778 buf);
779
780 return NULL;
781}
782
783static struct tcphdr *cake_get_tcphdr(const struct sk_buff *skb,
784 void *buf, unsigned int bufsize)
785{
786 unsigned int offset = skb_network_offset(skb);
787 const struct ipv6hdr *ipv6h;
788 const struct tcphdr *tcph;
789 const struct iphdr *iph;
790 struct ipv6hdr _ipv6h;
791 struct tcphdr _tcph;
792
793 ipv6h = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
794
795 if (!ipv6h)
796 return NULL;
797
798 if (ipv6h->version == 4) {
799 iph = (struct iphdr *)ipv6h;
800 offset += iph->ihl * 4;
801
802 /* special-case 6in4 tunnelling, as that is a common way to get
803 * v6 connectivity in the home
804 */
805 if (iph->protocol == IPPROTO_IPV6) {
806 ipv6h = skb_header_pointer(skb, offset,
807 sizeof(_ipv6h), &_ipv6h);
808
809 if (!ipv6h || ipv6h->nexthdr != IPPROTO_TCP)
810 return NULL;
811
812 offset += sizeof(struct ipv6hdr);
813
814 } else if (iph->protocol != IPPROTO_TCP) {
815 return NULL;
816 }
817
818 } else if (ipv6h->version == 6) {
819 if (ipv6h->nexthdr != IPPROTO_TCP)
820 return NULL;
821
822 offset += sizeof(struct ipv6hdr);
823 } else {
824 return NULL;
825 }
826
827 tcph = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
828 if (!tcph)
829 return NULL;
830
831 return skb_header_pointer(skb, offset,
832 min(__tcp_hdrlen(tcph), bufsize), buf);
833}
834
835static const void *cake_get_tcpopt(const struct tcphdr *tcph,
836 int code, int *oplen)
837{
838 /* inspired by tcp_parse_options in tcp_input.c */
839 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
840 const u8 *ptr = (const u8 *)(tcph + 1);
841
842 while (length > 0) {
843 int opcode = *ptr++;
844 int opsize;
845
846 if (opcode == TCPOPT_EOL)
847 break;
848 if (opcode == TCPOPT_NOP) {
849 length--;
850 continue;
851 }
852 opsize = *ptr++;
853 if (opsize < 2 || opsize > length)
854 break;
855
856 if (opcode == code) {
857 *oplen = opsize;
858 return ptr;
859 }
860
861 ptr += opsize - 2;
862 length -= opsize;
863 }
864
865 return NULL;
866}
867
868/* Compare two SACK sequences. A sequence is considered greater if it SACKs more
869 * bytes than the other. In the case where both sequences ACKs bytes that the
870 * other doesn't, A is considered greater. DSACKs in A also makes A be
871 * considered greater.
872 *
873 * @return -1, 0 or 1 as normal compare functions
874 */
875static int cake_tcph_sack_compare(const struct tcphdr *tcph_a,
876 const struct tcphdr *tcph_b)
877{
878 const struct tcp_sack_block_wire *sack_a, *sack_b;
879 u32 ack_seq_a = ntohl(tcph_a->ack_seq);
880 u32 bytes_a = 0, bytes_b = 0;
881 int oplen_a, oplen_b;
882 bool first = true;
883
884 sack_a = cake_get_tcpopt(tcph_a, TCPOPT_SACK, &oplen_a);
885 sack_b = cake_get_tcpopt(tcph_b, TCPOPT_SACK, &oplen_b);
886
887 /* pointers point to option contents */
888 oplen_a -= TCPOLEN_SACK_BASE;
889 oplen_b -= TCPOLEN_SACK_BASE;
890
891 if (sack_a && oplen_a >= sizeof(*sack_a) &&
892 (!sack_b || oplen_b < sizeof(*sack_b)))
893 return -1;
894 else if (sack_b && oplen_b >= sizeof(*sack_b) &&
895 (!sack_a || oplen_a < sizeof(*sack_a)))
896 return 1;
897 else if ((!sack_a || oplen_a < sizeof(*sack_a)) &&
898 (!sack_b || oplen_b < sizeof(*sack_b)))
899 return 0;
900
901 while (oplen_a >= sizeof(*sack_a)) {
902 const struct tcp_sack_block_wire *sack_tmp = sack_b;
903 u32 start_a = get_unaligned_be32(&sack_a->start_seq);
904 u32 end_a = get_unaligned_be32(&sack_a->end_seq);
905 int oplen_tmp = oplen_b;
906 bool found = false;
907
908 /* DSACK; always considered greater to prevent dropping */
909 if (before(start_a, ack_seq_a))
910 return -1;
911
912 bytes_a += end_a - start_a;
913
914 while (oplen_tmp >= sizeof(*sack_tmp)) {
915 u32 start_b = get_unaligned_be32(&sack_tmp->start_seq);
916 u32 end_b = get_unaligned_be32(&sack_tmp->end_seq);
917
918 /* first time through we count the total size */
919 if (first)
920 bytes_b += end_b - start_b;
921
922 if (!after(start_b, start_a) && !before(end_b, end_a)) {
923 found = true;
924 if (!first)
925 break;
926 }
927 oplen_tmp -= sizeof(*sack_tmp);
928 sack_tmp++;
929 }
930
931 if (!found)
932 return -1;
933
934 oplen_a -= sizeof(*sack_a);
935 sack_a++;
936 first = false;
937 }
938
939 /* If we made it this far, all ranges SACKed by A are covered by B, so
940 * either the SACKs are equal, or B SACKs more bytes.
941 */
942 return bytes_b > bytes_a ? 1 : 0;
943}
944
945static void cake_tcph_get_tstamp(const struct tcphdr *tcph,
946 u32 *tsval, u32 *tsecr)
947{
948 const u8 *ptr;
949 int opsize;
950
951 ptr = cake_get_tcpopt(tcph, TCPOPT_TIMESTAMP, &opsize);
952
953 if (ptr && opsize == TCPOLEN_TIMESTAMP) {
954 *tsval = get_unaligned_be32(ptr);
955 *tsecr = get_unaligned_be32(ptr + 4);
956 }
957}
958
959static bool cake_tcph_may_drop(const struct tcphdr *tcph,
960 u32 tstamp_new, u32 tsecr_new)
961{
962 /* inspired by tcp_parse_options in tcp_input.c */
963 int length = __tcp_hdrlen(tcph) - sizeof(struct tcphdr);
964 const u8 *ptr = (const u8 *)(tcph + 1);
965 u32 tstamp, tsecr;
966
967 /* 3 reserved flags must be unset to avoid future breakage
968 * ACK must be set
969 * ECE/CWR are handled separately
970 * All other flags URG/PSH/RST/SYN/FIN must be unset
971 * 0x0FFF0000 = all TCP flags (confirm ACK=1, others zero)
972 * 0x00C00000 = CWR/ECE (handled separately)
973 * 0x0F3F0000 = 0x0FFF0000 & ~0x00C00000
974 */
975 if (((tcp_flag_word(tcph) &
976 cpu_to_be32(0x0F3F0000)) != TCP_FLAG_ACK))
977 return false;
978
979 while (length > 0) {
980 int opcode = *ptr++;
981 int opsize;
982
983 if (opcode == TCPOPT_EOL)
984 break;
985 if (opcode == TCPOPT_NOP) {
986 length--;
987 continue;
988 }
989 opsize = *ptr++;
990 if (opsize < 2 || opsize > length)
991 break;
992
993 switch (opcode) {
994 case TCPOPT_MD5SIG: /* doesn't influence state */
995 break;
996
997 case TCPOPT_SACK: /* stricter checking performed later */
998 if (opsize % 8 != 2)
999 return false;
1000 break;
1001
1002 case TCPOPT_TIMESTAMP:
1003 /* only drop timestamps lower than new */
1004 if (opsize != TCPOLEN_TIMESTAMP)
1005 return false;
1006 tstamp = get_unaligned_be32(ptr);
1007 tsecr = get_unaligned_be32(ptr + 4);
1008 if (after(tstamp, tstamp_new) ||
1009 after(tsecr, tsecr_new))
1010 return false;
1011 break;
1012
1013 case TCPOPT_MSS: /* these should only be set on SYN */
1014 case TCPOPT_WINDOW:
1015 case TCPOPT_SACK_PERM:
1016 case TCPOPT_FASTOPEN:
1017 case TCPOPT_EXP:
1018 default: /* don't drop if any unknown options are present */
1019 return false;
1020 }
1021
1022 ptr += opsize - 2;
1023 length -= opsize;
1024 }
1025
1026 return true;
1027}
1028
1029static struct sk_buff *cake_ack_filter(struct cake_sched_data *q,
1030 struct cake_flow *flow)
1031{
1032 bool aggressive = q->ack_filter == CAKE_ACK_AGGRESSIVE;
1033 struct sk_buff *elig_ack = NULL, *elig_ack_prev = NULL;
1034 struct sk_buff *skb_check, *skb_prev = NULL;
1035 const struct ipv6hdr *ipv6h, *ipv6h_check;
1036 unsigned char _tcph[64], _tcph_check[64];
1037 const struct tcphdr *tcph, *tcph_check;
1038 const struct iphdr *iph, *iph_check;
1039 struct ipv6hdr _iph, _iph_check;
1040 const struct sk_buff *skb;
1041 int seglen, num_found = 0;
1042 u32 tstamp = 0, tsecr = 0;
1043 __be32 elig_flags = 0;
1044 int sack_comp;
1045
1046 /* no other possible ACKs to filter */
1047 if (flow->head == flow->tail)
1048 return NULL;
1049
1050 skb = flow->tail;
1051 tcph = cake_get_tcphdr(skb, _tcph, sizeof(_tcph));
1052 iph = cake_get_iphdr(skb, &_iph);
1053 if (!tcph)
1054 return NULL;
1055
1056 cake_tcph_get_tstamp(tcph, &tstamp, &tsecr);
1057
1058 /* the 'triggering' packet need only have the ACK flag set.
1059 * also check that SYN is not set, as there won't be any previous ACKs.
1060 */
1061 if ((tcp_flag_word(tcph) &
1062 (TCP_FLAG_ACK | TCP_FLAG_SYN)) != TCP_FLAG_ACK)
1063 return NULL;
1064
1065 /* the 'triggering' ACK is at the tail of the queue, we have already
1066 * returned if it is the only packet in the flow. loop through the rest
1067 * of the queue looking for pure ACKs with the same 5-tuple as the
1068 * triggering one.
1069 */
1070 for (skb_check = flow->head;
1071 skb_check && skb_check != skb;
1072 skb_prev = skb_check, skb_check = skb_check->next) {
1073 iph_check = cake_get_iphdr(skb_check, &_iph_check);
1074 tcph_check = cake_get_tcphdr(skb_check, &_tcph_check,
1075 sizeof(_tcph_check));
1076
1077 /* only TCP packets with matching 5-tuple are eligible, and only
1078 * drop safe headers
1079 */
1080 if (!tcph_check || iph->version != iph_check->version ||
1081 tcph_check->source != tcph->source ||
1082 tcph_check->dest != tcph->dest)
1083 continue;
1084
1085 if (iph_check->version == 4) {
1086 if (iph_check->saddr != iph->saddr ||
1087 iph_check->daddr != iph->daddr)
1088 continue;
1089
1090 seglen = ntohs(iph_check->tot_len) -
1091 (4 * iph_check->ihl);
1092 } else if (iph_check->version == 6) {
1093 ipv6h = (struct ipv6hdr *)iph;
1094 ipv6h_check = (struct ipv6hdr *)iph_check;
1095
1096 if (ipv6_addr_cmp(&ipv6h_check->saddr, &ipv6h->saddr) ||
1097 ipv6_addr_cmp(&ipv6h_check->daddr, &ipv6h->daddr))
1098 continue;
1099
1100 seglen = ntohs(ipv6h_check->payload_len);
1101 } else {
1102 WARN_ON(1); /* shouldn't happen */
1103 continue;
1104 }
1105
1106 /* If the ECE/CWR flags changed from the previous eligible
1107 * packet in the same flow, we should no longer be dropping that
1108 * previous packet as this would lose information.
1109 */
1110 if (elig_ack && (tcp_flag_word(tcph_check) &
1111 (TCP_FLAG_ECE | TCP_FLAG_CWR)) != elig_flags) {
1112 elig_ack = NULL;
1113 elig_ack_prev = NULL;
1114 num_found--;
1115 }
1116
1117 /* Check TCP options and flags, don't drop ACKs with segment
1118 * data, and don't drop ACKs with a higher cumulative ACK
1119 * counter than the triggering packet. Check ACK seqno here to
1120 * avoid parsing SACK options of packets we are going to exclude
1121 * anyway.
1122 */
1123 if (!cake_tcph_may_drop(tcph_check, tstamp, tsecr) ||
1124 (seglen - __tcp_hdrlen(tcph_check)) != 0 ||
1125 after(ntohl(tcph_check->ack_seq), ntohl(tcph->ack_seq)))
1126 continue;
1127
1128 /* Check SACK options. The triggering packet must SACK more data
1129 * than the ACK under consideration, or SACK the same range but
1130 * have a larger cumulative ACK counter. The latter is a
1131 * pathological case, but is contained in the following check
1132 * anyway, just to be safe.
1133 */
1134 sack_comp = cake_tcph_sack_compare(tcph_check, tcph);
1135
1136 if (sack_comp < 0 ||
1137 (ntohl(tcph_check->ack_seq) == ntohl(tcph->ack_seq) &&
1138 sack_comp == 0))
1139 continue;
1140
1141 /* At this point we have found an eligible pure ACK to drop; if
1142 * we are in aggressive mode, we are done. Otherwise, keep
1143 * searching unless this is the second eligible ACK we
1144 * found.
1145 *
1146 * Since we want to drop ACK closest to the head of the queue,
1147 * save the first eligible ACK we find, even if we need to loop
1148 * again.
1149 */
1150 if (!elig_ack) {
1151 elig_ack = skb_check;
1152 elig_ack_prev = skb_prev;
1153 elig_flags = (tcp_flag_word(tcph_check)
1154 & (TCP_FLAG_ECE | TCP_FLAG_CWR));
1155 }
1156
1157 if (num_found++ > 0)
1158 goto found;
1159 }
1160
1161 /* We made it through the queue without finding two eligible ACKs . If
1162 * we found a single eligible ACK we can drop it in aggressive mode if
1163 * we can guarantee that this does not interfere with ECN flag
1164 * information. We ensure this by dropping it only if the enqueued
1165 * packet is consecutive with the eligible ACK, and their flags match.
1166 */
1167 if (elig_ack && aggressive && elig_ack->next == skb &&
1168 (elig_flags == (tcp_flag_word(tcph) &
1169 (TCP_FLAG_ECE | TCP_FLAG_CWR))))
1170 goto found;
1171
1172 return NULL;
1173
1174found:
1175 if (elig_ack_prev)
1176 elig_ack_prev->next = elig_ack->next;
1177 else
1178 flow->head = elig_ack->next;
1179
1180 elig_ack->next = NULL;
1181
1182 return elig_ack;
1183}
1184
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001185static u64 cake_ewma(u64 avg, u64 sample, u32 shift)
1186{
1187 avg -= avg >> shift;
1188 avg += sample >> shift;
1189 return avg;
1190}
1191
1192static void cake_heap_swap(struct cake_sched_data *q, u16 i, u16 j)
1193{
1194 struct cake_heap_entry ii = q->overflow_heap[i];
1195 struct cake_heap_entry jj = q->overflow_heap[j];
1196
1197 q->overflow_heap[i] = jj;
1198 q->overflow_heap[j] = ii;
1199
1200 q->tins[ii.t].overflow_idx[ii.b] = j;
1201 q->tins[jj.t].overflow_idx[jj.b] = i;
1202}
1203
1204static u32 cake_heap_get_backlog(const struct cake_sched_data *q, u16 i)
1205{
1206 struct cake_heap_entry ii = q->overflow_heap[i];
1207
1208 return q->tins[ii.t].backlogs[ii.b];
1209}
1210
1211static void cake_heapify(struct cake_sched_data *q, u16 i)
1212{
1213 static const u32 a = CAKE_MAX_TINS * CAKE_QUEUES;
1214 u32 mb = cake_heap_get_backlog(q, i);
1215 u32 m = i;
1216
1217 while (m < a) {
1218 u32 l = m + m + 1;
1219 u32 r = l + 1;
1220
1221 if (l < a) {
1222 u32 lb = cake_heap_get_backlog(q, l);
1223
1224 if (lb > mb) {
1225 m = l;
1226 mb = lb;
1227 }
1228 }
1229
1230 if (r < a) {
1231 u32 rb = cake_heap_get_backlog(q, r);
1232
1233 if (rb > mb) {
1234 m = r;
1235 mb = rb;
1236 }
1237 }
1238
1239 if (m != i) {
1240 cake_heap_swap(q, i, m);
1241 i = m;
1242 } else {
1243 break;
1244 }
1245 }
1246}
1247
1248static void cake_heapify_up(struct cake_sched_data *q, u16 i)
1249{
1250 while (i > 0 && i < CAKE_MAX_TINS * CAKE_QUEUES) {
1251 u16 p = (i - 1) >> 1;
1252 u32 ib = cake_heap_get_backlog(q, i);
1253 u32 pb = cake_heap_get_backlog(q, p);
1254
1255 if (ib > pb) {
1256 cake_heap_swap(q, i, p);
1257 i = p;
1258 } else {
1259 break;
1260 }
1261 }
1262}
1263
1264static int cake_advance_shaper(struct cake_sched_data *q,
1265 struct cake_tin_data *b,
1266 struct sk_buff *skb,
1267 ktime_t now, bool drop)
1268{
1269 u32 len = qdisc_pkt_len(skb);
1270
1271 /* charge packet bandwidth to this tin
1272 * and to the global shaper.
1273 */
1274 if (q->rate_ns) {
1275 u64 tin_dur = (len * b->tin_rate_ns) >> b->tin_rate_shft;
1276 u64 global_dur = (len * q->rate_ns) >> q->rate_shft;
1277 u64 failsafe_dur = global_dur + (global_dur >> 1);
1278
1279 if (ktime_before(b->time_next_packet, now))
1280 b->time_next_packet = ktime_add_ns(b->time_next_packet,
1281 tin_dur);
1282
1283 else if (ktime_before(b->time_next_packet,
1284 ktime_add_ns(now, tin_dur)))
1285 b->time_next_packet = ktime_add_ns(now, tin_dur);
1286
1287 q->time_next_packet = ktime_add_ns(q->time_next_packet,
1288 global_dur);
1289 if (!drop)
1290 q->failsafe_next_packet = \
1291 ktime_add_ns(q->failsafe_next_packet,
1292 failsafe_dur);
1293 }
1294 return len;
1295}
1296
1297static unsigned int cake_drop(struct Qdisc *sch, struct sk_buff **to_free)
1298{
1299 struct cake_sched_data *q = qdisc_priv(sch);
1300 ktime_t now = ktime_get();
1301 u32 idx = 0, tin = 0, len;
1302 struct cake_heap_entry qq;
1303 struct cake_tin_data *b;
1304 struct cake_flow *flow;
1305 struct sk_buff *skb;
1306
1307 if (!q->overflow_timeout) {
1308 int i;
1309 /* Build fresh max-heap */
1310 for (i = CAKE_MAX_TINS * CAKE_QUEUES / 2; i >= 0; i--)
1311 cake_heapify(q, i);
1312 }
1313 q->overflow_timeout = 65535;
1314
1315 /* select longest queue for pruning */
1316 qq = q->overflow_heap[0];
1317 tin = qq.t;
1318 idx = qq.b;
1319
1320 b = &q->tins[tin];
1321 flow = &b->flows[idx];
1322 skb = dequeue_head(flow);
1323 if (unlikely(!skb)) {
1324 /* heap has gone wrong, rebuild it next time */
1325 q->overflow_timeout = 0;
1326 return idx + (tin << 16);
1327 }
1328
1329 if (cobalt_queue_full(&flow->cvars, &b->cparams, now))
1330 b->unresponsive_flow_count++;
1331
1332 len = qdisc_pkt_len(skb);
1333 q->buffer_used -= skb->truesize;
1334 b->backlogs[idx] -= len;
1335 b->tin_backlog -= len;
1336 sch->qstats.backlog -= len;
1337 qdisc_tree_reduce_backlog(sch, 1, len);
1338
1339 flow->dropped++;
1340 b->tin_dropped++;
1341 sch->qstats.drops++;
1342
Toke Høiland-Jørgensen7298de92018-07-06 17:37:19 +02001343 if (q->rate_flags & CAKE_FLAG_INGRESS)
1344 cake_advance_shaper(q, b, skb, now, true);
1345
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001346 __qdisc_drop(skb, to_free);
1347 sch->q.qlen--;
1348
1349 cake_heapify(q, 0);
1350
1351 return idx + (tin << 16);
1352}
1353
1354static u32 cake_classify(struct Qdisc *sch, struct cake_tin_data *t,
1355 struct sk_buff *skb, int flow_mode, int *qerr)
1356{
1357 struct cake_sched_data *q = qdisc_priv(sch);
1358 struct tcf_proto *filter;
1359 struct tcf_result res;
1360 int result;
1361
1362 filter = rcu_dereference_bh(q->filter_list);
1363 if (!filter)
1364 return cake_hash(t, skb, flow_mode) + 1;
1365
1366 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
1367 result = tcf_classify(skb, filter, &res, false);
1368 if (result >= 0) {
1369#ifdef CONFIG_NET_CLS_ACT
1370 switch (result) {
1371 case TC_ACT_STOLEN:
1372 case TC_ACT_QUEUED:
1373 case TC_ACT_TRAP:
1374 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
1375 /* fall through */
1376 case TC_ACT_SHOT:
1377 return 0;
1378 }
1379#endif
1380 if (TC_H_MIN(res.classid) <= CAKE_QUEUES)
1381 return TC_H_MIN(res.classid);
1382 }
1383 return 0;
1384}
1385
Toke Høiland-Jørgensen7298de92018-07-06 17:37:19 +02001386static void cake_reconfigure(struct Qdisc *sch);
1387
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001388static s32 cake_enqueue(struct sk_buff *skb, struct Qdisc *sch,
1389 struct sk_buff **to_free)
1390{
1391 struct cake_sched_data *q = qdisc_priv(sch);
1392 int len = qdisc_pkt_len(skb);
1393 int uninitialized_var(ret);
Toke Høiland-Jørgensen8b713882018-07-06 17:37:19 +02001394 struct sk_buff *ack = NULL;
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001395 ktime_t now = ktime_get();
1396 struct cake_tin_data *b;
1397 struct cake_flow *flow;
1398 u32 idx, tin;
1399
1400 tin = 0;
1401 b = &q->tins[tin];
1402
1403 /* choose flow to insert into */
1404 idx = cake_classify(sch, b, skb, q->flow_mode, &ret);
1405 if (idx == 0) {
1406 if (ret & __NET_XMIT_BYPASS)
1407 qdisc_qstats_drop(sch);
1408 __qdisc_drop(skb, to_free);
1409 return ret;
1410 }
1411 idx--;
1412 flow = &b->flows[idx];
1413
1414 /* ensure shaper state isn't stale */
1415 if (!b->tin_backlog) {
1416 if (ktime_before(b->time_next_packet, now))
1417 b->time_next_packet = now;
1418
1419 if (!sch->q.qlen) {
1420 if (ktime_before(q->time_next_packet, now)) {
1421 q->failsafe_next_packet = now;
1422 q->time_next_packet = now;
1423 } else if (ktime_after(q->time_next_packet, now) &&
1424 ktime_after(q->failsafe_next_packet, now)) {
1425 u64 next = \
1426 min(ktime_to_ns(q->time_next_packet),
1427 ktime_to_ns(
1428 q->failsafe_next_packet));
1429 sch->qstats.overlimits++;
1430 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1431 }
1432 }
1433 }
1434
1435 if (unlikely(len > b->max_skblen))
1436 b->max_skblen = len;
1437
1438 cobalt_set_enqueue_time(skb, now);
1439 flow_queue_add(flow, skb);
1440
Toke Høiland-Jørgensen8b713882018-07-06 17:37:19 +02001441 if (q->ack_filter)
1442 ack = cake_ack_filter(q, flow);
1443
1444 if (ack) {
1445 b->ack_drops++;
1446 sch->qstats.drops++;
1447 b->bytes += qdisc_pkt_len(ack);
1448 len -= qdisc_pkt_len(ack);
1449 q->buffer_used += skb->truesize - ack->truesize;
1450 if (q->rate_flags & CAKE_FLAG_INGRESS)
1451 cake_advance_shaper(q, b, ack, now, true);
1452
1453 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(ack));
1454 consume_skb(ack);
1455 } else {
1456 sch->q.qlen++;
1457 q->buffer_used += skb->truesize;
1458 }
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001459
1460 /* stats */
1461 b->packets++;
1462 b->bytes += len;
1463 b->backlogs[idx] += len;
1464 b->tin_backlog += len;
1465 sch->qstats.backlog += len;
1466 q->avg_window_bytes += len;
1467
1468 if (q->overflow_timeout)
1469 cake_heapify_up(q, b->overflow_idx[idx]);
1470
1471 /* incoming bandwidth capacity estimate */
Toke Høiland-Jørgensen7298de92018-07-06 17:37:19 +02001472 if (q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS) {
1473 u64 packet_interval = \
1474 ktime_to_ns(ktime_sub(now, q->last_packet_time));
1475
1476 if (packet_interval > NSEC_PER_SEC)
1477 packet_interval = NSEC_PER_SEC;
1478
1479 /* filter out short-term bursts, eg. wifi aggregation */
1480 q->avg_packet_interval = \
1481 cake_ewma(q->avg_packet_interval,
1482 packet_interval,
1483 (packet_interval > q->avg_packet_interval ?
1484 2 : 8));
1485
1486 q->last_packet_time = now;
1487
1488 if (packet_interval > q->avg_packet_interval) {
1489 u64 window_interval = \
1490 ktime_to_ns(ktime_sub(now,
1491 q->avg_window_begin));
1492 u64 b = q->avg_window_bytes * (u64)NSEC_PER_SEC;
1493
1494 do_div(b, window_interval);
1495 q->avg_peak_bandwidth =
1496 cake_ewma(q->avg_peak_bandwidth, b,
1497 b > q->avg_peak_bandwidth ? 2 : 8);
1498 q->avg_window_bytes = 0;
1499 q->avg_window_begin = now;
1500
1501 if (ktime_after(now,
1502 ktime_add_ms(q->last_reconfig_time,
1503 250))) {
1504 q->rate_bps = (q->avg_peak_bandwidth * 15) >> 4;
1505 cake_reconfigure(sch);
1506 }
1507 }
1508 } else {
1509 q->avg_window_bytes = 0;
1510 q->last_packet_time = now;
1511 }
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001512
1513 /* flowchain */
1514 if (!flow->set || flow->set == CAKE_SET_DECAYING) {
1515 struct cake_host *srchost = &b->hosts[flow->srchost];
1516 struct cake_host *dsthost = &b->hosts[flow->dsthost];
1517 u16 host_load = 1;
1518
1519 if (!flow->set) {
1520 list_add_tail(&flow->flowchain, &b->new_flows);
1521 } else {
1522 b->decaying_flow_count--;
1523 list_move_tail(&flow->flowchain, &b->new_flows);
1524 }
1525 flow->set = CAKE_SET_SPARSE;
1526 b->sparse_flow_count++;
1527
1528 if (cake_dsrc(q->flow_mode))
1529 host_load = max(host_load, srchost->srchost_refcnt);
1530
1531 if (cake_ddst(q->flow_mode))
1532 host_load = max(host_load, dsthost->dsthost_refcnt);
1533
1534 flow->deficit = (b->flow_quantum *
1535 quantum_div[host_load]) >> 16;
1536 } else if (flow->set == CAKE_SET_SPARSE_WAIT) {
1537 /* this flow was empty, accounted as a sparse flow, but actually
1538 * in the bulk rotation.
1539 */
1540 flow->set = CAKE_SET_BULK;
1541 b->sparse_flow_count--;
1542 b->bulk_flow_count++;
1543 }
1544
1545 if (q->buffer_used > q->buffer_max_used)
1546 q->buffer_max_used = q->buffer_used;
1547
1548 if (q->buffer_used > q->buffer_limit) {
1549 u32 dropped = 0;
1550
1551 while (q->buffer_used > q->buffer_limit) {
1552 dropped++;
1553 cake_drop(sch, to_free);
1554 }
1555 b->drop_overlimit += dropped;
1556 }
1557 return NET_XMIT_SUCCESS;
1558}
1559
1560static struct sk_buff *cake_dequeue_one(struct Qdisc *sch)
1561{
1562 struct cake_sched_data *q = qdisc_priv(sch);
1563 struct cake_tin_data *b = &q->tins[q->cur_tin];
1564 struct cake_flow *flow = &b->flows[q->cur_flow];
1565 struct sk_buff *skb = NULL;
1566 u32 len;
1567
1568 if (flow->head) {
1569 skb = dequeue_head(flow);
1570 len = qdisc_pkt_len(skb);
1571 b->backlogs[q->cur_flow] -= len;
1572 b->tin_backlog -= len;
1573 sch->qstats.backlog -= len;
1574 q->buffer_used -= skb->truesize;
1575 sch->q.qlen--;
1576
1577 if (q->overflow_timeout)
1578 cake_heapify(q, b->overflow_idx[q->cur_flow]);
1579 }
1580 return skb;
1581}
1582
1583/* Discard leftover packets from a tin no longer in use. */
1584static void cake_clear_tin(struct Qdisc *sch, u16 tin)
1585{
1586 struct cake_sched_data *q = qdisc_priv(sch);
1587 struct sk_buff *skb;
1588
1589 q->cur_tin = tin;
1590 for (q->cur_flow = 0; q->cur_flow < CAKE_QUEUES; q->cur_flow++)
1591 while (!!(skb = cake_dequeue_one(sch)))
1592 kfree_skb(skb);
1593}
1594
1595static struct sk_buff *cake_dequeue(struct Qdisc *sch)
1596{
1597 struct cake_sched_data *q = qdisc_priv(sch);
1598 struct cake_tin_data *b = &q->tins[q->cur_tin];
1599 struct cake_host *srchost, *dsthost;
1600 ktime_t now = ktime_get();
1601 struct cake_flow *flow;
1602 struct list_head *head;
1603 bool first_flow = true;
1604 struct sk_buff *skb;
1605 u16 host_load;
1606 u64 delay;
1607 u32 len;
1608
1609begin:
1610 if (!sch->q.qlen)
1611 return NULL;
1612
1613 /* global hard shaper */
1614 if (ktime_after(q->time_next_packet, now) &&
1615 ktime_after(q->failsafe_next_packet, now)) {
1616 u64 next = min(ktime_to_ns(q->time_next_packet),
1617 ktime_to_ns(q->failsafe_next_packet));
1618
1619 sch->qstats.overlimits++;
1620 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1621 return NULL;
1622 }
1623
1624 /* Choose a class to work on. */
1625 if (!q->rate_ns) {
1626 /* In unlimited mode, can't rely on shaper timings, just balance
1627 * with DRR
1628 */
1629 bool wrapped = false, empty = true;
1630
1631 while (b->tin_deficit < 0 ||
1632 !(b->sparse_flow_count + b->bulk_flow_count)) {
1633 if (b->tin_deficit <= 0)
1634 b->tin_deficit += b->tin_quantum_band;
1635 if (b->sparse_flow_count + b->bulk_flow_count)
1636 empty = false;
1637
1638 q->cur_tin++;
1639 b++;
1640 if (q->cur_tin >= q->tin_cnt) {
1641 q->cur_tin = 0;
1642 b = q->tins;
1643
1644 if (wrapped) {
1645 /* It's possible for q->qlen to be
1646 * nonzero when we actually have no
1647 * packets anywhere.
1648 */
1649 if (empty)
1650 return NULL;
1651 } else {
1652 wrapped = true;
1653 }
1654 }
1655 }
1656 } else {
1657 /* In shaped mode, choose:
1658 * - Highest-priority tin with queue and meeting schedule, or
1659 * - The earliest-scheduled tin with queue.
1660 */
1661 ktime_t best_time = KTIME_MAX;
1662 int tin, best_tin = 0;
1663
1664 for (tin = 0; tin < q->tin_cnt; tin++) {
1665 b = q->tins + tin;
1666 if ((b->sparse_flow_count + b->bulk_flow_count) > 0) {
1667 ktime_t time_to_pkt = \
1668 ktime_sub(b->time_next_packet, now);
1669
1670 if (ktime_to_ns(time_to_pkt) <= 0 ||
1671 ktime_compare(time_to_pkt,
1672 best_time) <= 0) {
1673 best_time = time_to_pkt;
1674 best_tin = tin;
1675 }
1676 }
1677 }
1678
1679 q->cur_tin = best_tin;
1680 b = q->tins + best_tin;
1681
1682 /* No point in going further if no packets to deliver. */
1683 if (unlikely(!(b->sparse_flow_count + b->bulk_flow_count)))
1684 return NULL;
1685 }
1686
1687retry:
1688 /* service this class */
1689 head = &b->decaying_flows;
1690 if (!first_flow || list_empty(head)) {
1691 head = &b->new_flows;
1692 if (list_empty(head)) {
1693 head = &b->old_flows;
1694 if (unlikely(list_empty(head))) {
1695 head = &b->decaying_flows;
1696 if (unlikely(list_empty(head)))
1697 goto begin;
1698 }
1699 }
1700 }
1701 flow = list_first_entry(head, struct cake_flow, flowchain);
1702 q->cur_flow = flow - b->flows;
1703 first_flow = false;
1704
1705 /* triple isolation (modified DRR++) */
1706 srchost = &b->hosts[flow->srchost];
1707 dsthost = &b->hosts[flow->dsthost];
1708 host_load = 1;
1709
1710 if (cake_dsrc(q->flow_mode))
1711 host_load = max(host_load, srchost->srchost_refcnt);
1712
1713 if (cake_ddst(q->flow_mode))
1714 host_load = max(host_load, dsthost->dsthost_refcnt);
1715
1716 WARN_ON(host_load > CAKE_QUEUES);
1717
1718 /* flow isolation (DRR++) */
1719 if (flow->deficit <= 0) {
1720 /* The shifted prandom_u32() is a way to apply dithering to
1721 * avoid accumulating roundoff errors
1722 */
1723 flow->deficit += (b->flow_quantum * quantum_div[host_load] +
1724 (prandom_u32() >> 16)) >> 16;
1725 list_move_tail(&flow->flowchain, &b->old_flows);
1726
1727 /* Keep all flows with deficits out of the sparse and decaying
1728 * rotations. No non-empty flow can go into the decaying
1729 * rotation, so they can't get deficits
1730 */
1731 if (flow->set == CAKE_SET_SPARSE) {
1732 if (flow->head) {
1733 b->sparse_flow_count--;
1734 b->bulk_flow_count++;
1735 flow->set = CAKE_SET_BULK;
1736 } else {
1737 /* we've moved it to the bulk rotation for
1738 * correct deficit accounting but we still want
1739 * to count it as a sparse flow, not a bulk one.
1740 */
1741 flow->set = CAKE_SET_SPARSE_WAIT;
1742 }
1743 }
1744 goto retry;
1745 }
1746
1747 /* Retrieve a packet via the AQM */
1748 while (1) {
1749 skb = cake_dequeue_one(sch);
1750 if (!skb) {
1751 /* this queue was actually empty */
1752 if (cobalt_queue_empty(&flow->cvars, &b->cparams, now))
1753 b->unresponsive_flow_count--;
1754
1755 if (flow->cvars.p_drop || flow->cvars.count ||
1756 ktime_before(now, flow->cvars.drop_next)) {
1757 /* keep in the flowchain until the state has
1758 * decayed to rest
1759 */
1760 list_move_tail(&flow->flowchain,
1761 &b->decaying_flows);
1762 if (flow->set == CAKE_SET_BULK) {
1763 b->bulk_flow_count--;
1764 b->decaying_flow_count++;
1765 } else if (flow->set == CAKE_SET_SPARSE ||
1766 flow->set == CAKE_SET_SPARSE_WAIT) {
1767 b->sparse_flow_count--;
1768 b->decaying_flow_count++;
1769 }
1770 flow->set = CAKE_SET_DECAYING;
1771 } else {
1772 /* remove empty queue from the flowchain */
1773 list_del_init(&flow->flowchain);
1774 if (flow->set == CAKE_SET_SPARSE ||
1775 flow->set == CAKE_SET_SPARSE_WAIT)
1776 b->sparse_flow_count--;
1777 else if (flow->set == CAKE_SET_BULK)
1778 b->bulk_flow_count--;
1779 else
1780 b->decaying_flow_count--;
1781
1782 flow->set = CAKE_SET_NONE;
1783 srchost->srchost_refcnt--;
1784 dsthost->dsthost_refcnt--;
1785 }
1786 goto begin;
1787 }
1788
1789 /* Last packet in queue may be marked, shouldn't be dropped */
Toke Høiland-Jørgensen7298de92018-07-06 17:37:19 +02001790 if (!cobalt_should_drop(&flow->cvars, &b->cparams, now, skb,
1791 (b->bulk_flow_count *
1792 !!(q->rate_flags &
1793 CAKE_FLAG_INGRESS))) ||
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001794 !flow->head)
1795 break;
1796
Toke Høiland-Jørgensen7298de92018-07-06 17:37:19 +02001797 /* drop this packet, get another one */
1798 if (q->rate_flags & CAKE_FLAG_INGRESS) {
1799 len = cake_advance_shaper(q, b, skb,
1800 now, true);
1801 flow->deficit -= len;
1802 b->tin_deficit -= len;
1803 }
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001804 flow->dropped++;
1805 b->tin_dropped++;
1806 qdisc_tree_reduce_backlog(sch, 1, qdisc_pkt_len(skb));
1807 qdisc_qstats_drop(sch);
1808 kfree_skb(skb);
Toke Høiland-Jørgensen7298de92018-07-06 17:37:19 +02001809 if (q->rate_flags & CAKE_FLAG_INGRESS)
1810 goto retry;
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001811 }
1812
1813 b->tin_ecn_mark += !!flow->cvars.ecn_marked;
1814 qdisc_bstats_update(sch, skb);
1815
1816 /* collect delay stats */
1817 delay = ktime_to_ns(ktime_sub(now, cobalt_get_enqueue_time(skb)));
1818 b->avge_delay = cake_ewma(b->avge_delay, delay, 8);
1819 b->peak_delay = cake_ewma(b->peak_delay, delay,
1820 delay > b->peak_delay ? 2 : 8);
1821 b->base_delay = cake_ewma(b->base_delay, delay,
1822 delay < b->base_delay ? 2 : 8);
1823
1824 len = cake_advance_shaper(q, b, skb, now, false);
1825 flow->deficit -= len;
1826 b->tin_deficit -= len;
1827
1828 if (ktime_after(q->time_next_packet, now) && sch->q.qlen) {
1829 u64 next = min(ktime_to_ns(q->time_next_packet),
1830 ktime_to_ns(q->failsafe_next_packet));
1831
1832 qdisc_watchdog_schedule_ns(&q->watchdog, next);
1833 } else if (!sch->q.qlen) {
1834 int i;
1835
1836 for (i = 0; i < q->tin_cnt; i++) {
1837 if (q->tins[i].decaying_flow_count) {
1838 ktime_t next = \
1839 ktime_add_ns(now,
1840 q->tins[i].cparams.target);
1841
1842 qdisc_watchdog_schedule_ns(&q->watchdog,
1843 ktime_to_ns(next));
1844 break;
1845 }
1846 }
1847 }
1848
1849 if (q->overflow_timeout)
1850 q->overflow_timeout--;
1851
1852 return skb;
1853}
1854
1855static void cake_reset(struct Qdisc *sch)
1856{
1857 u32 c;
1858
1859 for (c = 0; c < CAKE_MAX_TINS; c++)
1860 cake_clear_tin(sch, c);
1861}
1862
1863static const struct nla_policy cake_policy[TCA_CAKE_MAX + 1] = {
1864 [TCA_CAKE_BASE_RATE64] = { .type = NLA_U64 },
1865 [TCA_CAKE_DIFFSERV_MODE] = { .type = NLA_U32 },
1866 [TCA_CAKE_ATM] = { .type = NLA_U32 },
1867 [TCA_CAKE_FLOW_MODE] = { .type = NLA_U32 },
1868 [TCA_CAKE_OVERHEAD] = { .type = NLA_S32 },
1869 [TCA_CAKE_RTT] = { .type = NLA_U32 },
1870 [TCA_CAKE_TARGET] = { .type = NLA_U32 },
1871 [TCA_CAKE_AUTORATE] = { .type = NLA_U32 },
1872 [TCA_CAKE_MEMORY] = { .type = NLA_U32 },
1873 [TCA_CAKE_NAT] = { .type = NLA_U32 },
1874 [TCA_CAKE_RAW] = { .type = NLA_U32 },
1875 [TCA_CAKE_WASH] = { .type = NLA_U32 },
1876 [TCA_CAKE_MPU] = { .type = NLA_U32 },
1877 [TCA_CAKE_INGRESS] = { .type = NLA_U32 },
1878 [TCA_CAKE_ACK_FILTER] = { .type = NLA_U32 },
1879};
1880
1881static void cake_set_rate(struct cake_tin_data *b, u64 rate, u32 mtu,
1882 u64 target_ns, u64 rtt_est_ns)
1883{
1884 /* convert byte-rate into time-per-byte
1885 * so it will always unwedge in reasonable time.
1886 */
1887 static const u64 MIN_RATE = 64;
1888 u32 byte_target = mtu;
1889 u64 byte_target_ns;
1890 u8 rate_shft = 0;
1891 u64 rate_ns = 0;
1892
1893 b->flow_quantum = 1514;
1894 if (rate) {
1895 b->flow_quantum = max(min(rate >> 12, 1514ULL), 300ULL);
1896 rate_shft = 34;
1897 rate_ns = ((u64)NSEC_PER_SEC) << rate_shft;
1898 rate_ns = div64_u64(rate_ns, max(MIN_RATE, rate));
1899 while (!!(rate_ns >> 34)) {
1900 rate_ns >>= 1;
1901 rate_shft--;
1902 }
1903 } /* else unlimited, ie. zero delay */
1904
1905 b->tin_rate_bps = rate;
1906 b->tin_rate_ns = rate_ns;
1907 b->tin_rate_shft = rate_shft;
1908
1909 byte_target_ns = (byte_target * rate_ns) >> rate_shft;
1910
1911 b->cparams.target = max((byte_target_ns * 3) / 2, target_ns);
1912 b->cparams.interval = max(rtt_est_ns +
1913 b->cparams.target - target_ns,
1914 b->cparams.target * 2);
1915 b->cparams.mtu_time = byte_target_ns;
1916 b->cparams.p_inc = 1 << 24; /* 1/256 */
1917 b->cparams.p_dec = 1 << 20; /* 1/4096 */
1918}
1919
1920static void cake_reconfigure(struct Qdisc *sch)
1921{
1922 struct cake_sched_data *q = qdisc_priv(sch);
1923 struct cake_tin_data *b = &q->tins[0];
1924 int c, ft = 0;
1925
1926 q->tin_cnt = 1;
1927 cake_set_rate(b, q->rate_bps, psched_mtu(qdisc_dev(sch)),
1928 us_to_ns(q->target), us_to_ns(q->interval));
1929 b->tin_quantum_band = 65535;
1930 b->tin_quantum_prio = 65535;
1931
1932 for (c = q->tin_cnt; c < CAKE_MAX_TINS; c++) {
1933 cake_clear_tin(sch, c);
1934 q->tins[c].cparams.mtu_time = q->tins[ft].cparams.mtu_time;
1935 }
1936
1937 q->rate_ns = q->tins[ft].tin_rate_ns;
1938 q->rate_shft = q->tins[ft].tin_rate_shft;
1939
1940 if (q->buffer_config_limit) {
1941 q->buffer_limit = q->buffer_config_limit;
1942 } else if (q->rate_bps) {
1943 u64 t = q->rate_bps * q->interval;
1944
1945 do_div(t, USEC_PER_SEC / 4);
1946 q->buffer_limit = max_t(u32, t, 4U << 20);
1947 } else {
1948 q->buffer_limit = ~0;
1949 }
1950
1951 sch->flags &= ~TCQ_F_CAN_BYPASS;
1952
1953 q->buffer_limit = min(q->buffer_limit,
1954 max(sch->limit * psched_mtu(qdisc_dev(sch)),
1955 q->buffer_config_limit));
1956}
1957
1958static int cake_change(struct Qdisc *sch, struct nlattr *opt,
1959 struct netlink_ext_ack *extack)
1960{
1961 struct cake_sched_data *q = qdisc_priv(sch);
1962 struct nlattr *tb[TCA_CAKE_MAX + 1];
1963 int err;
1964
1965 if (!opt)
1966 return -EINVAL;
1967
1968 err = nla_parse_nested(tb, TCA_CAKE_MAX, opt, cake_policy, extack);
1969 if (err < 0)
1970 return err;
1971
Toke Høiland-Jørgensenea825112018-07-06 17:37:19 +02001972 if (tb[TCA_CAKE_NAT]) {
1973#if IS_ENABLED(CONFIG_NF_CONNTRACK)
1974 q->flow_mode &= ~CAKE_FLOW_NAT_FLAG;
1975 q->flow_mode |= CAKE_FLOW_NAT_FLAG *
1976 !!nla_get_u32(tb[TCA_CAKE_NAT]);
1977#else
1978 NL_SET_ERR_MSG_ATTR(extack, tb[TCA_CAKE_NAT],
1979 "No conntrack support in kernel");
1980 return -EOPNOTSUPP;
1981#endif
1982 }
1983
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001984 if (tb[TCA_CAKE_BASE_RATE64])
1985 q->rate_bps = nla_get_u64(tb[TCA_CAKE_BASE_RATE64]);
1986
1987 if (tb[TCA_CAKE_FLOW_MODE])
Toke Høiland-Jørgensenea825112018-07-06 17:37:19 +02001988 q->flow_mode = ((q->flow_mode & CAKE_FLOW_NAT_FLAG) |
1989 (nla_get_u32(tb[TCA_CAKE_FLOW_MODE]) &
1990 CAKE_FLOW_MASK));
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02001991
1992 if (tb[TCA_CAKE_RTT]) {
1993 q->interval = nla_get_u32(tb[TCA_CAKE_RTT]);
1994
1995 if (!q->interval)
1996 q->interval = 1;
1997 }
1998
1999 if (tb[TCA_CAKE_TARGET]) {
2000 q->target = nla_get_u32(tb[TCA_CAKE_TARGET]);
2001
2002 if (!q->target)
2003 q->target = 1;
2004 }
2005
Toke Høiland-Jørgensen7298de92018-07-06 17:37:19 +02002006 if (tb[TCA_CAKE_AUTORATE]) {
2007 if (!!nla_get_u32(tb[TCA_CAKE_AUTORATE]))
2008 q->rate_flags |= CAKE_FLAG_AUTORATE_INGRESS;
2009 else
2010 q->rate_flags &= ~CAKE_FLAG_AUTORATE_INGRESS;
2011 }
2012
2013 if (tb[TCA_CAKE_INGRESS]) {
2014 if (!!nla_get_u32(tb[TCA_CAKE_INGRESS]))
2015 q->rate_flags |= CAKE_FLAG_INGRESS;
2016 else
2017 q->rate_flags &= ~CAKE_FLAG_INGRESS;
2018 }
2019
Toke Høiland-Jørgensen8b713882018-07-06 17:37:19 +02002020 if (tb[TCA_CAKE_ACK_FILTER])
2021 q->ack_filter = nla_get_u32(tb[TCA_CAKE_ACK_FILTER]);
2022
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02002023 if (tb[TCA_CAKE_MEMORY])
2024 q->buffer_config_limit = nla_get_u32(tb[TCA_CAKE_MEMORY]);
2025
2026 if (q->tins) {
2027 sch_tree_lock(sch);
2028 cake_reconfigure(sch);
2029 sch_tree_unlock(sch);
2030 }
2031
2032 return 0;
2033}
2034
2035static void cake_destroy(struct Qdisc *sch)
2036{
2037 struct cake_sched_data *q = qdisc_priv(sch);
2038
2039 qdisc_watchdog_cancel(&q->watchdog);
2040 tcf_block_put(q->block);
2041 kvfree(q->tins);
2042}
2043
2044static int cake_init(struct Qdisc *sch, struct nlattr *opt,
2045 struct netlink_ext_ack *extack)
2046{
2047 struct cake_sched_data *q = qdisc_priv(sch);
2048 int i, j, err;
2049
2050 sch->limit = 10240;
2051 q->tin_mode = CAKE_DIFFSERV_BESTEFFORT;
2052 q->flow_mode = CAKE_FLOW_TRIPLE;
2053
2054 q->rate_bps = 0; /* unlimited by default */
2055
2056 q->interval = 100000; /* 100ms default */
2057 q->target = 5000; /* 5ms: codel RFC argues
2058 * for 5 to 10% of interval
2059 */
2060
2061 q->cur_tin = 0;
2062 q->cur_flow = 0;
2063
2064 qdisc_watchdog_init(&q->watchdog, sch);
2065
2066 if (opt) {
2067 int err = cake_change(sch, opt, extack);
2068
2069 if (err)
2070 return err;
2071 }
2072
2073 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
2074 if (err)
2075 return err;
2076
2077 quantum_div[0] = ~0;
2078 for (i = 1; i <= CAKE_QUEUES; i++)
2079 quantum_div[i] = 65535 / i;
2080
2081 q->tins = kvzalloc(CAKE_MAX_TINS * sizeof(struct cake_tin_data),
2082 GFP_KERNEL);
2083 if (!q->tins)
2084 goto nomem;
2085
2086 for (i = 0; i < CAKE_MAX_TINS; i++) {
2087 struct cake_tin_data *b = q->tins + i;
2088
2089 INIT_LIST_HEAD(&b->new_flows);
2090 INIT_LIST_HEAD(&b->old_flows);
2091 INIT_LIST_HEAD(&b->decaying_flows);
2092 b->sparse_flow_count = 0;
2093 b->bulk_flow_count = 0;
2094 b->decaying_flow_count = 0;
2095
2096 for (j = 0; j < CAKE_QUEUES; j++) {
2097 struct cake_flow *flow = b->flows + j;
2098 u32 k = j * CAKE_MAX_TINS + i;
2099
2100 INIT_LIST_HEAD(&flow->flowchain);
2101 cobalt_vars_init(&flow->cvars);
2102
2103 q->overflow_heap[k].t = i;
2104 q->overflow_heap[k].b = j;
2105 b->overflow_idx[j] = k;
2106 }
2107 }
2108
2109 cake_reconfigure(sch);
2110 q->avg_peak_bandwidth = q->rate_bps;
2111 q->min_netlen = ~0;
2112 q->min_adjlen = ~0;
2113 return 0;
2114
2115nomem:
2116 cake_destroy(sch);
2117 return -ENOMEM;
2118}
2119
2120static int cake_dump(struct Qdisc *sch, struct sk_buff *skb)
2121{
2122 struct cake_sched_data *q = qdisc_priv(sch);
2123 struct nlattr *opts;
2124
2125 opts = nla_nest_start(skb, TCA_OPTIONS);
2126 if (!opts)
2127 goto nla_put_failure;
2128
2129 if (nla_put_u64_64bit(skb, TCA_CAKE_BASE_RATE64, q->rate_bps,
2130 TCA_CAKE_PAD))
2131 goto nla_put_failure;
2132
2133 if (nla_put_u32(skb, TCA_CAKE_FLOW_MODE,
2134 q->flow_mode & CAKE_FLOW_MASK))
2135 goto nla_put_failure;
2136
2137 if (nla_put_u32(skb, TCA_CAKE_RTT, q->interval))
2138 goto nla_put_failure;
2139
2140 if (nla_put_u32(skb, TCA_CAKE_TARGET, q->target))
2141 goto nla_put_failure;
2142
2143 if (nla_put_u32(skb, TCA_CAKE_MEMORY, q->buffer_config_limit))
2144 goto nla_put_failure;
2145
Toke Høiland-Jørgensen7298de92018-07-06 17:37:19 +02002146 if (nla_put_u32(skb, TCA_CAKE_AUTORATE,
2147 !!(q->rate_flags & CAKE_FLAG_AUTORATE_INGRESS)))
2148 goto nla_put_failure;
2149
2150 if (nla_put_u32(skb, TCA_CAKE_INGRESS,
2151 !!(q->rate_flags & CAKE_FLAG_INGRESS)))
2152 goto nla_put_failure;
2153
Toke Høiland-Jørgensen8b713882018-07-06 17:37:19 +02002154 if (nla_put_u32(skb, TCA_CAKE_ACK_FILTER, q->ack_filter))
2155 goto nla_put_failure;
2156
Toke Høiland-Jørgensenea825112018-07-06 17:37:19 +02002157 if (nla_put_u32(skb, TCA_CAKE_NAT,
2158 !!(q->flow_mode & CAKE_FLOW_NAT_FLAG)))
2159 goto nla_put_failure;
2160
Toke Høiland-Jørgensen046f6fd2018-07-06 17:37:19 +02002161 return nla_nest_end(skb, opts);
2162
2163nla_put_failure:
2164 return -1;
2165}
2166
2167static int cake_dump_stats(struct Qdisc *sch, struct gnet_dump *d)
2168{
2169 struct nlattr *stats = nla_nest_start(d->skb, TCA_STATS_APP);
2170 struct cake_sched_data *q = qdisc_priv(sch);
2171 struct nlattr *tstats, *ts;
2172 int i;
2173
2174 if (!stats)
2175 return -1;
2176
2177#define PUT_STAT_U32(attr, data) do { \
2178 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2179 goto nla_put_failure; \
2180 } while (0)
2181#define PUT_STAT_U64(attr, data) do { \
2182 if (nla_put_u64_64bit(d->skb, TCA_CAKE_STATS_ ## attr, \
2183 data, TCA_CAKE_STATS_PAD)) \
2184 goto nla_put_failure; \
2185 } while (0)
2186
2187 PUT_STAT_U64(CAPACITY_ESTIMATE64, q->avg_peak_bandwidth);
2188 PUT_STAT_U32(MEMORY_LIMIT, q->buffer_limit);
2189 PUT_STAT_U32(MEMORY_USED, q->buffer_max_used);
2190 PUT_STAT_U32(AVG_NETOFF, ((q->avg_netoff + 0x8000) >> 16));
2191 PUT_STAT_U32(MAX_NETLEN, q->max_netlen);
2192 PUT_STAT_U32(MAX_ADJLEN, q->max_adjlen);
2193 PUT_STAT_U32(MIN_NETLEN, q->min_netlen);
2194 PUT_STAT_U32(MIN_ADJLEN, q->min_adjlen);
2195
2196#undef PUT_STAT_U32
2197#undef PUT_STAT_U64
2198
2199 tstats = nla_nest_start(d->skb, TCA_CAKE_STATS_TIN_STATS);
2200 if (!tstats)
2201 goto nla_put_failure;
2202
2203#define PUT_TSTAT_U32(attr, data) do { \
2204 if (nla_put_u32(d->skb, TCA_CAKE_TIN_STATS_ ## attr, data)) \
2205 goto nla_put_failure; \
2206 } while (0)
2207#define PUT_TSTAT_U64(attr, data) do { \
2208 if (nla_put_u64_64bit(d->skb, TCA_CAKE_TIN_STATS_ ## attr, \
2209 data, TCA_CAKE_TIN_STATS_PAD)) \
2210 goto nla_put_failure; \
2211 } while (0)
2212
2213 for (i = 0; i < q->tin_cnt; i++) {
2214 struct cake_tin_data *b = &q->tins[i];
2215
2216 ts = nla_nest_start(d->skb, i + 1);
2217 if (!ts)
2218 goto nla_put_failure;
2219
2220 PUT_TSTAT_U64(THRESHOLD_RATE64, b->tin_rate_bps);
2221 PUT_TSTAT_U64(SENT_BYTES64, b->bytes);
2222 PUT_TSTAT_U32(BACKLOG_BYTES, b->tin_backlog);
2223
2224 PUT_TSTAT_U32(TARGET_US,
2225 ktime_to_us(ns_to_ktime(b->cparams.target)));
2226 PUT_TSTAT_U32(INTERVAL_US,
2227 ktime_to_us(ns_to_ktime(b->cparams.interval)));
2228
2229 PUT_TSTAT_U32(SENT_PACKETS, b->packets);
2230 PUT_TSTAT_U32(DROPPED_PACKETS, b->tin_dropped);
2231 PUT_TSTAT_U32(ECN_MARKED_PACKETS, b->tin_ecn_mark);
2232 PUT_TSTAT_U32(ACKS_DROPPED_PACKETS, b->ack_drops);
2233
2234 PUT_TSTAT_U32(PEAK_DELAY_US,
2235 ktime_to_us(ns_to_ktime(b->peak_delay)));
2236 PUT_TSTAT_U32(AVG_DELAY_US,
2237 ktime_to_us(ns_to_ktime(b->avge_delay)));
2238 PUT_TSTAT_U32(BASE_DELAY_US,
2239 ktime_to_us(ns_to_ktime(b->base_delay)));
2240
2241 PUT_TSTAT_U32(WAY_INDIRECT_HITS, b->way_hits);
2242 PUT_TSTAT_U32(WAY_MISSES, b->way_misses);
2243 PUT_TSTAT_U32(WAY_COLLISIONS, b->way_collisions);
2244
2245 PUT_TSTAT_U32(SPARSE_FLOWS, b->sparse_flow_count +
2246 b->decaying_flow_count);
2247 PUT_TSTAT_U32(BULK_FLOWS, b->bulk_flow_count);
2248 PUT_TSTAT_U32(UNRESPONSIVE_FLOWS, b->unresponsive_flow_count);
2249 PUT_TSTAT_U32(MAX_SKBLEN, b->max_skblen);
2250
2251 PUT_TSTAT_U32(FLOW_QUANTUM, b->flow_quantum);
2252 nla_nest_end(d->skb, ts);
2253 }
2254
2255#undef PUT_TSTAT_U32
2256#undef PUT_TSTAT_U64
2257
2258 nla_nest_end(d->skb, tstats);
2259 return nla_nest_end(d->skb, stats);
2260
2261nla_put_failure:
2262 nla_nest_cancel(d->skb, stats);
2263 return -1;
2264}
2265
2266static struct Qdisc *cake_leaf(struct Qdisc *sch, unsigned long arg)
2267{
2268 return NULL;
2269}
2270
2271static unsigned long cake_find(struct Qdisc *sch, u32 classid)
2272{
2273 return 0;
2274}
2275
2276static unsigned long cake_bind(struct Qdisc *sch, unsigned long parent,
2277 u32 classid)
2278{
2279 return 0;
2280}
2281
2282static void cake_unbind(struct Qdisc *q, unsigned long cl)
2283{
2284}
2285
2286static struct tcf_block *cake_tcf_block(struct Qdisc *sch, unsigned long cl,
2287 struct netlink_ext_ack *extack)
2288{
2289 struct cake_sched_data *q = qdisc_priv(sch);
2290
2291 if (cl)
2292 return NULL;
2293 return q->block;
2294}
2295
2296static int cake_dump_class(struct Qdisc *sch, unsigned long cl,
2297 struct sk_buff *skb, struct tcmsg *tcm)
2298{
2299 tcm->tcm_handle |= TC_H_MIN(cl);
2300 return 0;
2301}
2302
2303static int cake_dump_class_stats(struct Qdisc *sch, unsigned long cl,
2304 struct gnet_dump *d)
2305{
2306 struct cake_sched_data *q = qdisc_priv(sch);
2307 const struct cake_flow *flow = NULL;
2308 struct gnet_stats_queue qs = { 0 };
2309 struct nlattr *stats;
2310 u32 idx = cl - 1;
2311
2312 if (idx < CAKE_QUEUES * q->tin_cnt) {
2313 const struct cake_tin_data *b = &q->tins[idx / CAKE_QUEUES];
2314 const struct sk_buff *skb;
2315
2316 flow = &b->flows[idx % CAKE_QUEUES];
2317
2318 if (flow->head) {
2319 sch_tree_lock(sch);
2320 skb = flow->head;
2321 while (skb) {
2322 qs.qlen++;
2323 skb = skb->next;
2324 }
2325 sch_tree_unlock(sch);
2326 }
2327 qs.backlog = b->backlogs[idx % CAKE_QUEUES];
2328 qs.drops = flow->dropped;
2329 }
2330 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
2331 return -1;
2332 if (flow) {
2333 ktime_t now = ktime_get();
2334
2335 stats = nla_nest_start(d->skb, TCA_STATS_APP);
2336 if (!stats)
2337 return -1;
2338
2339#define PUT_STAT_U32(attr, data) do { \
2340 if (nla_put_u32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2341 goto nla_put_failure; \
2342 } while (0)
2343#define PUT_STAT_S32(attr, data) do { \
2344 if (nla_put_s32(d->skb, TCA_CAKE_STATS_ ## attr, data)) \
2345 goto nla_put_failure; \
2346 } while (0)
2347
2348 PUT_STAT_S32(DEFICIT, flow->deficit);
2349 PUT_STAT_U32(DROPPING, flow->cvars.dropping);
2350 PUT_STAT_U32(COBALT_COUNT, flow->cvars.count);
2351 PUT_STAT_U32(P_DROP, flow->cvars.p_drop);
2352 if (flow->cvars.p_drop) {
2353 PUT_STAT_S32(BLUE_TIMER_US,
2354 ktime_to_us(
2355 ktime_sub(now,
2356 flow->cvars.blue_timer)));
2357 }
2358 if (flow->cvars.dropping) {
2359 PUT_STAT_S32(DROP_NEXT_US,
2360 ktime_to_us(
2361 ktime_sub(now,
2362 flow->cvars.drop_next)));
2363 }
2364
2365 if (nla_nest_end(d->skb, stats) < 0)
2366 return -1;
2367 }
2368
2369 return 0;
2370
2371nla_put_failure:
2372 nla_nest_cancel(d->skb, stats);
2373 return -1;
2374}
2375
2376static void cake_walk(struct Qdisc *sch, struct qdisc_walker *arg)
2377{
2378 struct cake_sched_data *q = qdisc_priv(sch);
2379 unsigned int i, j;
2380
2381 if (arg->stop)
2382 return;
2383
2384 for (i = 0; i < q->tin_cnt; i++) {
2385 struct cake_tin_data *b = &q->tins[i];
2386
2387 for (j = 0; j < CAKE_QUEUES; j++) {
2388 if (list_empty(&b->flows[j].flowchain) ||
2389 arg->count < arg->skip) {
2390 arg->count++;
2391 continue;
2392 }
2393 if (arg->fn(sch, i * CAKE_QUEUES + j + 1, arg) < 0) {
2394 arg->stop = 1;
2395 break;
2396 }
2397 arg->count++;
2398 }
2399 }
2400}
2401
2402static const struct Qdisc_class_ops cake_class_ops = {
2403 .leaf = cake_leaf,
2404 .find = cake_find,
2405 .tcf_block = cake_tcf_block,
2406 .bind_tcf = cake_bind,
2407 .unbind_tcf = cake_unbind,
2408 .dump = cake_dump_class,
2409 .dump_stats = cake_dump_class_stats,
2410 .walk = cake_walk,
2411};
2412
2413static struct Qdisc_ops cake_qdisc_ops __read_mostly = {
2414 .cl_ops = &cake_class_ops,
2415 .id = "cake",
2416 .priv_size = sizeof(struct cake_sched_data),
2417 .enqueue = cake_enqueue,
2418 .dequeue = cake_dequeue,
2419 .peek = qdisc_peek_dequeued,
2420 .init = cake_init,
2421 .reset = cake_reset,
2422 .destroy = cake_destroy,
2423 .change = cake_change,
2424 .dump = cake_dump,
2425 .dump_stats = cake_dump_stats,
2426 .owner = THIS_MODULE,
2427};
2428
2429static int __init cake_module_init(void)
2430{
2431 return register_qdisc(&cake_qdisc_ops);
2432}
2433
2434static void __exit cake_module_exit(void)
2435{
2436 unregister_qdisc(&cake_qdisc_ops);
2437}
2438
2439module_init(cake_module_init)
2440module_exit(cake_module_exit)
2441MODULE_AUTHOR("Jonathan Morton");
2442MODULE_LICENSE("Dual BSD/GPL");
2443MODULE_DESCRIPTION("The CAKE shaper.");