blob: a65b677acdb0c7892b31da96d17c058d240bebb1 [file] [log] [blame]
brakmo71634d72019-07-02 15:09:52 -07001// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public
6 * License as published by the Free Software Foundation.
7 *
8 * Sample Host Bandwidth Manager (HBM) BPF program.
9 *
10 * A cgroup skb BPF egress program to limit cgroup output bandwidth.
11 * It uses a modified virtual token bucket queue to limit average
12 * egress bandwidth. The implementation uses credits instead of tokens.
13 * Negative credits imply that queueing would have happened (this is
14 * a virtual queue, so no queueing is done by it. However, queueing may
15 * occur at the actual qdisc (which is not used for rate limiting).
16 *
17 * This implementation uses 3 thresholds, one to start marking packets and
18 * the other two to drop packets:
19 * CREDIT
20 * - <--------------------------|------------------------> +
21 * | | | 0
22 * | Large pkt |
23 * | drop thresh |
24 * Small pkt drop Mark threshold
25 * thresh
26 *
27 * The effect of marking depends on the type of packet:
28 * a) If the packet is ECN enabled and it is a TCP packet, then the packet
29 * is ECN marked.
30 * b) If the packet is a TCP packet, then we probabilistically call tcp_cwr
31 * to reduce the congestion window. The current implementation uses a linear
32 * distribution (0% probability at marking threshold, 100% probability
33 * at drop threshold).
34 * c) If the packet is not a TCP packet, then it is dropped.
35 *
36 * If the credit is below the drop threshold, the packet is dropped. If it
37 * is a TCP packet, then it also calls tcp_cwr since packets dropped by
38 * by a cgroup skb BPF program do not automatically trigger a call to
39 * tcp_cwr in the current kernel code.
40 *
41 * This BPF program actually uses 2 drop thresholds, one threshold
42 * for larger packets (>= 120 bytes) and another for smaller packets. This
43 * protects smaller packets such as SYNs, ACKs, etc.
44 *
45 * The default bandwidth limit is set at 1Gbps but this can be changed by
46 * a user program through a shared BPF map. In addition, by default this BPF
47 * program does not limit connections using loopback. This behavior can be
48 * overwritten by the user program. There is also an option to calculate
49 * some statistics, such as percent of packets marked or dropped, which
50 * a user program, such as hbm, can access.
51 */
52
53#include "hbm_kern.h"
54
55SEC("cgroup_skb/egress")
56int _hbm_out_cg(struct __sk_buff *skb)
57{
58 long long delta = 0, delta_send;
59 unsigned long long curtime, sendtime;
60 struct hbm_queue_stats *qsp = NULL;
61 unsigned int queue_index = 0;
62 bool congestion_flag = false;
63 bool ecn_ce_flag = false;
64 struct hbm_pkt_info pkti = {};
65 struct hbm_vqueue *qdp;
66 bool drop_flag = false;
67 bool cwr_flag = false;
68 int len = skb->len;
69 int rv = ALLOW_PKT;
70
71 qsp = bpf_map_lookup_elem(&queue_stats, &queue_index);
72
73 // Check if we should ignore loopback traffic
74 if (qsp != NULL && !qsp->loopback && (skb->ifindex == 1))
75 return ALLOW_PKT;
76
77 hbm_get_pkt_info(skb, &pkti);
78
79 // We may want to account for the length of headers in len
80 // calculation, like ETH header + overhead, specially if it
81 // is a gso packet. But I am not doing it right now.
82
83 qdp = bpf_get_local_storage(&queue_state, 0);
84 if (!qdp)
85 return ALLOW_PKT;
86 if (qdp->lasttime == 0)
87 hbm_init_edt_vqueue(qdp, 1024);
88
89 curtime = bpf_ktime_get_ns();
90
91 // Begin critical section
92 bpf_spin_lock(&qdp->lock);
93 delta = qdp->lasttime - curtime;
94 // bound bursts to 100us
95 if (delta < -BURST_SIZE_NS) {
96 // negative delta is a credit that allows bursts
97 qdp->lasttime = curtime - BURST_SIZE_NS;
98 delta = -BURST_SIZE_NS;
99 }
100 sendtime = qdp->lasttime;
101 delta_send = BYTES_TO_NS(len, qdp->rate);
102 __sync_add_and_fetch(&(qdp->lasttime), delta_send);
103 bpf_spin_unlock(&qdp->lock);
104 // End critical section
105
106 // Set EDT of packet
107 skb->tstamp = sendtime;
108
109 // Check if we should update rate
110 if (qsp != NULL && (qsp->rate * 128) != qdp->rate)
111 qdp->rate = qsp->rate * 128;
112
113 // Set flags (drop, congestion, cwr)
114 // last packet will be sent in the future, bound latency
115 if (delta > DROP_THRESH_NS || (delta > LARGE_PKT_DROP_THRESH_NS &&
116 len > LARGE_PKT_THRESH)) {
117 drop_flag = true;
118 if (pkti.is_tcp && pkti.ecn == 0)
119 cwr_flag = true;
120 } else if (delta > MARK_THRESH_NS) {
121 if (pkti.is_tcp)
122 congestion_flag = true;
123 else
124 drop_flag = true;
125 }
126
127 if (congestion_flag) {
128 if (bpf_skb_ecn_set_ce(skb)) {
129 ecn_ce_flag = true;
130 } else {
131 if (pkti.is_tcp) {
132 unsigned int rand = bpf_get_prandom_u32();
133
134 if (delta >= MARK_THRESH_NS +
135 (rand % MARK_REGION_SIZE_NS)) {
136 // Do congestion control
137 cwr_flag = true;
138 }
139 } else if (len > LARGE_PKT_THRESH) {
140 // Problem if too many small packets?
141 drop_flag = true;
142 congestion_flag = false;
143 }
144 }
145 }
146
147 if (pkti.is_tcp && drop_flag && pkti.packets_out <= 1) {
148 drop_flag = false;
149 cwr_flag = true;
150 congestion_flag = false;
151 }
152
153 if (qsp != NULL && qsp->no_cn)
154 cwr_flag = false;
155
156 hbm_update_stats(qsp, len, curtime, congestion_flag, drop_flag,
157 cwr_flag, ecn_ce_flag, &pkti, (int) delta);
158
159 if (drop_flag) {
160 __sync_add_and_fetch(&(qdp->lasttime), -delta_send);
161 rv = DROP_PKT;
162 }
163
164 if (cwr_flag)
165 rv |= CWR;
166 return rv;
167}
168char _license[] SEC("license") = "GPL";