blob: e07837e23b3fd2435c87320945528abdee9817cc [file] [log] [blame]
Thomas Gleixner09c434b2019-05-19 13:08:20 +01001// SPDX-License-Identifier: GPL-2.0-only
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -08002/*
Sangtae Haae27e982008-10-29 00:07:18 -04003 * TCP CUBIC: Binary Increase Congestion control for TCP v2.3
Sangtae Ha6b3d6262008-03-04 14:17:41 -08004 * Home page:
5 * http://netsrv.csc.ncsu.edu/twiki/bin/view/Main/BIC
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -08006 * This is from the implementation of CUBIC TCP in
Sangtae Haae27e982008-10-29 00:07:18 -04007 * Sangtae Ha, Injong Rhee and Lisong Xu,
8 * "CUBIC: A New TCP-Friendly High-Speed TCP Variant"
9 * in ACM SIGOPS Operating System Review, July 2008.
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080010 * Available from:
Sangtae Haae27e982008-10-29 00:07:18 -040011 * http://netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf
12 *
13 * CUBIC integrates a new slow start algorithm, called HyStart.
14 * The details of HyStart are presented in
15 * Sangtae Ha and Injong Rhee,
16 * "Taming the Elephants: New TCP Slow Start", NCSU TechReport 2008.
17 * Available from:
18 * http://netsrv.csc.ncsu.edu/export/hystart_techreport_2008.pdf
19 *
20 * All testing results are available from:
21 * http://netsrv.csc.ncsu.edu/wiki/index.php/TCP_Testing
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080022 *
23 * Unless CUBIC is enabled and congestion window is large
24 * this behaves the same as the original Reno.
25 */
26
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080027#include <linux/mm.h>
Kumar Kartikeya Dwivedi0e32dfc2021-10-02 06:47:53 +053028#include <linux/btf.h>
29#include <linux/btf_ids.h>
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080030#include <linux/module.h>
Roman Zippel6f6d6a12008-05-01 04:34:28 -070031#include <linux/math64.h>
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080032#include <net/tcp.h>
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080033
34#define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
35 * max_cwnd = snd_cwnd * beta
36 */
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080037#define BICTCP_HZ 10 /* BIC HZ 2^10 = 1024 */
38
Sangtae Haae27e982008-10-29 00:07:18 -040039/* Two methods of hybrid slow start */
40#define HYSTART_ACK_TRAIN 0x1
41#define HYSTART_DELAY 0x2
42
43/* Number of delay samples for detecting the increase of delay */
44#define HYSTART_MIN_SAMPLES 8
Eric Dumazetcff04e22019-12-23 12:27:52 -080045#define HYSTART_DELAY_MIN (4000U) /* 4 ms */
46#define HYSTART_DELAY_MAX (16000U) /* 16 ms */
Sangtae Haae27e982008-10-29 00:07:18 -040047#define HYSTART_DELAY_THRESH(x) clamp(x, HYSTART_DELAY_MIN, HYSTART_DELAY_MAX)
48
Stephen Hemminger59758f42007-02-12 13:15:20 -080049static int fast_convergence __read_mostly = 1;
Sangtae Ha6b3d6262008-03-04 14:17:41 -080050static int beta __read_mostly = 717; /* = 717/1024 (BICTCP_BETA_SCALE) */
David S. Miller66e1e3b2007-06-13 01:03:53 -070051static int initial_ssthresh __read_mostly;
Stephen Hemminger59758f42007-02-12 13:15:20 -080052static int bic_scale __read_mostly = 41;
53static int tcp_friendliness __read_mostly = 1;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080054
Sangtae Haae27e982008-10-29 00:07:18 -040055static int hystart __read_mostly = 1;
56static int hystart_detect __read_mostly = HYSTART_ACK_TRAIN | HYSTART_DELAY;
57static int hystart_low_window __read_mostly = 16;
Eric Dumazetcff04e22019-12-23 12:27:52 -080058static int hystart_ack_delta_us __read_mostly = 2000;
Sangtae Haae27e982008-10-29 00:07:18 -040059
Stephen Hemminger59758f42007-02-12 13:15:20 -080060static u32 cube_rtt_scale __read_mostly;
61static u32 beta_scale __read_mostly;
62static u64 cube_factor __read_mostly;
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -080063
64/* Note parameters that are used for precomputing scale factors are read-only */
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080065module_param(fast_convergence, int, 0644);
66MODULE_PARM_DESC(fast_convergence, "turn on/off fast convergence");
Sangtae Ha6b3d6262008-03-04 14:17:41 -080067module_param(beta, int, 0644);
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080068MODULE_PARM_DESC(beta, "beta for multiplicative increase");
69module_param(initial_ssthresh, int, 0644);
70MODULE_PARM_DESC(initial_ssthresh, "initial value of slow start threshold");
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -080071module_param(bic_scale, int, 0444);
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080072MODULE_PARM_DESC(bic_scale, "scale (scaled by 1024) value for bic function (bic_scale/1024)");
73module_param(tcp_friendliness, int, 0644);
74MODULE_PARM_DESC(tcp_friendliness, "turn on/off tcp friendliness");
Sangtae Haae27e982008-10-29 00:07:18 -040075module_param(hystart, int, 0644);
76MODULE_PARM_DESC(hystart, "turn on/off hybrid slow start algorithm");
77module_param(hystart_detect, int, 0644);
Chema Gonzalezd6ecf322017-04-18 19:22:23 -070078MODULE_PARM_DESC(hystart_detect, "hybrid slow start detection mechanisms"
Sangtae Haae27e982008-10-29 00:07:18 -040079 " 1: packet-train 2: delay 3: both packet-train and delay");
80module_param(hystart_low_window, int, 0644);
81MODULE_PARM_DESC(hystart_low_window, "lower bound cwnd for hybrid slow start");
Eric Dumazetcff04e22019-12-23 12:27:52 -080082module_param(hystart_ack_delta_us, int, 0644);
83MODULE_PARM_DESC(hystart_ack_delta_us, "spacing between ack's indicating train (usecs)");
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080084
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080085/* BIC TCP Parameters */
86struct bictcp {
87 u32 cnt; /* increase cwnd by 1 after ACKs */
stephen hemminger688d1942014-08-29 23:32:05 -070088 u32 last_max_cwnd; /* last maximum snd_cwnd */
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080089 u32 last_cwnd; /* the last snd_cwnd */
90 u32 last_time; /* time when updated last_cwnd */
91 u32 bic_origin_point;/* origin point of bic function */
stephen hemminger688d1942014-08-29 23:32:05 -070092 u32 bic_K; /* time to origin point
93 from the beginning of the current epoch */
Eric Dumazetcff04e22019-12-23 12:27:52 -080094 u32 delay_min; /* min delay (usec) */
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -080095 u32 epoch_start; /* beginning of an epoch */
96 u32 ack_cnt; /* number of acks */
97 u32 tcp_cwnd; /* estimated tcp cwnd */
Neal Cardwell9cd981d2015-01-28 20:01:38 -050098 u16 unused;
Sangtae Haae27e982008-10-29 00:07:18 -040099 u8 sample_cnt; /* number of samples to decide curr_rtt */
100 u8 found; /* the exit point is found? */
101 u32 round_start; /* beginning of each round */
102 u32 end_seq; /* end_seq of the round */
stephen hemminger17a6e9f2011-03-14 07:52:15 +0000103 u32 last_ack; /* last time when the ACK spacing is close */
Sangtae Haae27e982008-10-29 00:07:18 -0400104 u32 curr_rtt; /* the minimum rtt of current round */
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800105};
106
107static inline void bictcp_reset(struct bictcp *ca)
108{
Yejune Dengf4d133d2021-01-14 12:14:56 +0800109 memset(ca, 0, offsetof(struct bictcp, unused));
Sangtae Haae27e982008-10-29 00:07:18 -0400110 ca->found = 0;
111}
112
Eric Dumazetcff04e22019-12-23 12:27:52 -0800113static inline u32 bictcp_clock_us(const struct sock *sk)
stephen hemminger17a6e9f2011-03-14 07:52:15 +0000114{
Eric Dumazetcff04e22019-12-23 12:27:52 -0800115 return tcp_sk(sk)->tcp_mstamp;
stephen hemminger17a6e9f2011-03-14 07:52:15 +0000116}
117
Sangtae Haae27e982008-10-29 00:07:18 -0400118static inline void bictcp_hystart_reset(struct sock *sk)
119{
120 struct tcp_sock *tp = tcp_sk(sk);
121 struct bictcp *ca = inet_csk_ca(sk);
122
Eric Dumazetcff04e22019-12-23 12:27:52 -0800123 ca->round_start = ca->last_ack = bictcp_clock_us(sk);
Sangtae Haae27e982008-10-29 00:07:18 -0400124 ca->end_seq = tp->snd_nxt;
Eric Dumazet35821fc2019-12-23 12:27:51 -0800125 ca->curr_rtt = ~0U;
Sangtae Haae27e982008-10-29 00:07:18 -0400126 ca->sample_cnt = 0;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800127}
128
Martin KaFai Laud22f6ad2021-03-24 18:51:55 -0700129static void cubictcp_init(struct sock *sk)
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800130{
Neal Cardwell5a45f002012-01-18 17:47:59 +0000131 struct bictcp *ca = inet_csk_ca(sk);
132
133 bictcp_reset(ca);
Sangtae Haae27e982008-10-29 00:07:18 -0400134
135 if (hystart)
136 bictcp_hystart_reset(sk);
137
138 if (!hystart && initial_ssthresh)
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800139 tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
140}
141
Martin KaFai Laud22f6ad2021-03-24 18:51:55 -0700142static void cubictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
Eric Dumazet30927522015-09-09 21:55:07 -0700143{
144 if (event == CA_EVENT_TX_START) {
Eric Dumazet30927522015-09-09 21:55:07 -0700145 struct bictcp *ca = inet_csk_ca(sk);
Eric Dumazetd635fbe2017-05-16 14:00:03 -0700146 u32 now = tcp_jiffies32;
Eric Dumazetc2e72042015-09-17 08:38:00 -0700147 s32 delta;
148
149 delta = now - tcp_sk(sk)->lsndtime;
Eric Dumazet30927522015-09-09 21:55:07 -0700150
151 /* We were application limited (idle) for a while.
152 * Shift epoch_start to keep cwnd growth to cubic curve.
153 */
Eric Dumazetc2e72042015-09-17 08:38:00 -0700154 if (ca->epoch_start && delta > 0) {
Eric Dumazet30927522015-09-09 21:55:07 -0700155 ca->epoch_start += delta;
Eric Dumazetc2e72042015-09-17 08:38:00 -0700156 if (after(ca->epoch_start, now))
157 ca->epoch_start = now;
158 }
Eric Dumazet30927522015-09-09 21:55:07 -0700159 return;
160 }
161}
162
Stephen Hemminger7e588862007-03-22 12:10:58 -0700163/* calculate the cubic root of x using a table lookup followed by one
164 * Newton-Raphson iteration.
165 * Avg err ~= 0.195%
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800166 */
Stephen Hemminger9eb2d622005-12-21 19:32:36 -0800167static u32 cubic_root(u64 a)
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800168{
Stephen Hemminger7e588862007-03-22 12:10:58 -0700169 u32 x, b, shift;
170 /*
171 * cbrt(x) MSB values for x MSB values in [0..63].
172 * Precomputed then refined by hand - Willy Tarreau
173 *
174 * For x in [0..63],
175 * v = cbrt(x << 18) - 1
176 * cbrt(x) = (v[x] + 10) >> 6
Stephen Hemminger9eb2d622005-12-21 19:32:36 -0800177 */
Stephen Hemminger7e588862007-03-22 12:10:58 -0700178 static const u8 v[] = {
179 /* 0x00 */ 0, 54, 54, 54, 118, 118, 118, 118,
180 /* 0x08 */ 123, 129, 134, 138, 143, 147, 151, 156,
181 /* 0x10 */ 157, 161, 164, 168, 170, 173, 176, 179,
182 /* 0x18 */ 181, 185, 187, 190, 192, 194, 197, 199,
183 /* 0x20 */ 200, 202, 204, 206, 209, 211, 213, 215,
184 /* 0x28 */ 217, 219, 221, 222, 224, 225, 227, 229,
185 /* 0x30 */ 231, 232, 234, 236, 237, 239, 240, 242,
186 /* 0x38 */ 244, 245, 246, 248, 250, 251, 252, 254,
187 };
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800188
Stephen Hemminger7e588862007-03-22 12:10:58 -0700189 b = fls64(a);
190 if (b < 7) {
191 /* a in [0..63] */
192 return ((u32)v[(u32)a] + 35) >> 6;
193 }
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800194
Stephen Hemminger7e588862007-03-22 12:10:58 -0700195 b = ((b * 84) >> 8) - 1;
196 shift = (a >> (b * 3));
197
198 x = ((u32)(((u32)v[shift] + 10) << b)) >> 6;
199
200 /*
201 * Newton-Raphson iteration
202 * 2
203 * x = ( 2 * x + a / x ) / 3
204 * k+1 k k
205 */
Roman Zippel6f6d6a12008-05-01 04:34:28 -0700206 x = (2 * x + (u32)div64_u64(a, (u64)x * (u64)(x - 1)));
Stephen Hemminger7e588862007-03-22 12:10:58 -0700207 x = ((x * 341) >> 10);
Stephen Hemminger9eb2d622005-12-21 19:32:36 -0800208 return x;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800209}
210
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800211/*
212 * Compute congestion window to use.
213 */
Neal Cardwell9cd981d2015-01-28 20:01:38 -0500214static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800215{
Eric Dumazet2ed0edf2013-08-05 17:10:15 -0700216 u32 delta, bic_target, max_cnt;
217 u64 offs, t;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800218
Neal Cardwell9cd981d2015-01-28 20:01:38 -0500219 ca->ack_cnt += acked; /* count the number of ACKed packets */
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800220
221 if (ca->last_cwnd == cwnd &&
Eric Dumazetac35f562017-05-16 14:00:06 -0700222 (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800223 return;
224
Neal Cardwelld6b1a8a2015-01-28 20:01:39 -0500225 /* The CUBIC function can update ca->cnt at most once per jiffy.
226 * On all cwnd reduction events, ca->epoch_start is set to 0,
227 * which will force a recalculation of ca->cnt.
228 */
Eric Dumazetac35f562017-05-16 14:00:06 -0700229 if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
Neal Cardwelld6b1a8a2015-01-28 20:01:39 -0500230 goto tcp_friendliness;
231
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800232 ca->last_cwnd = cwnd;
Eric Dumazetac35f562017-05-16 14:00:06 -0700233 ca->last_time = tcp_jiffies32;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800234
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800235 if (ca->epoch_start == 0) {
Eric Dumazetac35f562017-05-16 14:00:06 -0700236 ca->epoch_start = tcp_jiffies32; /* record beginning */
Neal Cardwell9cd981d2015-01-28 20:01:38 -0500237 ca->ack_cnt = acked; /* start counting */
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800238 ca->tcp_cwnd = cwnd; /* syn with cubic */
239
240 if (ca->last_max_cwnd <= cwnd) {
241 ca->bic_K = 0;
242 ca->bic_origin_point = cwnd;
243 } else {
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -0800244 /* Compute new K based on
245 * (wmax-cwnd) * (srtt>>3 / HZ) / c * 2^(3*bictcp_HZ)
246 */
247 ca->bic_K = cubic_root(cube_factor
248 * (ca->last_max_cwnd - cwnd));
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800249 ca->bic_origin_point = ca->last_max_cwnd;
250 }
251 }
252
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900253 /* cubic function - calc*/
254 /* calculate c * time^3 / rtt,
255 * while considering overflow in calculation of time^3
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -0800256 * (so time^3 is done by using 64 bit)
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800257 * and without the support of division of 64bit numbers
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -0800258 * (so all divisions are done by using 32 bit)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900259 * also NOTE the unit of those veriables
260 * time = (t - K) / 2^bictcp_HZ
261 * c = bic_scale >> 10
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800262 * rtt = (srtt >> 3) / HZ
263 * !!! The following code does not have overflow problems,
264 * if the cwnd < 1 million packets !!!
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900265 */
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800266
Eric Dumazetac35f562017-05-16 14:00:06 -0700267 t = (s32)(tcp_jiffies32 - ca->epoch_start);
Eric Dumazetcff04e22019-12-23 12:27:52 -0800268 t += usecs_to_jiffies(ca->delay_min);
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800269 /* change the unit from HZ to bictcp_HZ */
Eric Dumazet2ed0edf2013-08-05 17:10:15 -0700270 t <<= BICTCP_HZ;
271 do_div(t, HZ);
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800272
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900273 if (t < ca->bic_K) /* t - K */
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -0800274 offs = ca->bic_K - t;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900275 else
276 offs = t - ca->bic_K;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800277
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -0800278 /* c/rtt * (t-K)^3 */
279 delta = (cube_rtt_scale * offs * offs * offs) >> (10+3*BICTCP_HZ);
stephen hemminger688d1942014-08-29 23:32:05 -0700280 if (t < ca->bic_K) /* below origin*/
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900281 bic_target = ca->bic_origin_point - delta;
stephen hemminger688d1942014-08-29 23:32:05 -0700282 else /* above origin*/
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900283 bic_target = ca->bic_origin_point + delta;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800284
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900285 /* cubic function - calc bictcp_cnt*/
286 if (bic_target > cwnd) {
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800287 ca->cnt = cwnd / (bic_target - cwnd);
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900288 } else {
289 ca->cnt = 100 * cwnd; /* very small increment*/
290 }
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800291
Sangtae Hab5ccd072011-03-14 07:52:18 +0000292 /*
293 * The initial growth of cubic function may be too conservative
294 * when the available bandwidth is still unknown.
295 */
Neal Cardwell5a45f002012-01-18 17:47:59 +0000296 if (ca->last_max_cwnd == 0 && ca->cnt > 20)
Sangtae Hab5ccd072011-03-14 07:52:18 +0000297 ca->cnt = 20; /* increase cwnd 5% per RTT */
298
Neal Cardwelld6b1a8a2015-01-28 20:01:39 -0500299tcp_friendliness:
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800300 /* TCP Friendly */
301 if (tcp_friendliness) {
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -0800302 u32 scale = beta_scale;
stephen hemminger688d1942014-08-29 23:32:05 -0700303
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -0800304 delta = (cwnd * scale) >> 3;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900305 while (ca->ack_cnt > delta) { /* update tcp cwnd */
306 ca->ack_cnt -= delta;
307 ca->tcp_cwnd++;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800308 }
309
stephen hemminger688d1942014-08-29 23:32:05 -0700310 if (ca->tcp_cwnd > cwnd) { /* if bic is slower than tcp */
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -0800311 delta = ca->tcp_cwnd - cwnd;
312 max_cnt = cwnd / delta;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800313 if (ca->cnt > max_cnt)
314 ca->cnt = max_cnt;
315 }
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900316 }
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800317
Neal Cardwelld578e182015-03-10 17:17:04 -0400318 /* The maximum rate of cwnd increase CUBIC allows is 1 packet per
319 * 2 packets ACKed, meaning cwnd grows at 1.5x per RTT.
320 */
321 ca->cnt = max(ca->cnt, 2U);
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800322}
323
Martin KaFai Laud22f6ad2021-03-24 18:51:55 -0700324static void cubictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800325{
326 struct tcp_sock *tp = tcp_sk(sk);
327 struct bictcp *ca = inet_csk_ca(sk);
328
Eric Dumazet24901552014-05-02 21:18:05 -0700329 if (!tcp_is_cwnd_limited(sk))
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800330 return;
331
Yuchung Cheng071d5082015-07-09 13:16:29 -0700332 if (tcp_in_slow_start(tp)) {
Neal Cardwell9cd981d2015-01-28 20:01:38 -0500333 acked = tcp_slow_start(tp, acked);
334 if (!acked)
335 return;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800336 }
Neal Cardwell9cd981d2015-01-28 20:01:38 -0500337 bictcp_update(ca, tp->snd_cwnd, acked);
338 tcp_cong_avoid_ai(tp, ca->cnt, acked);
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800339}
340
Martin KaFai Laud22f6ad2021-03-24 18:51:55 -0700341static u32 cubictcp_recalc_ssthresh(struct sock *sk)
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800342{
343 const struct tcp_sock *tp = tcp_sk(sk);
344 struct bictcp *ca = inet_csk_ca(sk);
345
346 ca->epoch_start = 0; /* end of epoch */
347
348 /* Wmax and fast convergence */
349 if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
350 ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
351 / (2 * BICTCP_BETA_SCALE);
352 else
353 ca->last_max_cwnd = tp->snd_cwnd;
354
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800355 return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
356}
357
Martin KaFai Laud22f6ad2021-03-24 18:51:55 -0700358static void cubictcp_state(struct sock *sk, u8 new_state)
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800359{
Sangtae Haae27e982008-10-29 00:07:18 -0400360 if (new_state == TCP_CA_Loss) {
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800361 bictcp_reset(inet_csk_ca(sk));
Sangtae Haae27e982008-10-29 00:07:18 -0400362 bictcp_hystart_reset(sk);
363 }
364}
365
Eric Dumazetf278b992019-12-30 06:06:19 -0800366/* Account for TSO/GRO delays.
367 * Otherwise short RTT flows could get too small ssthresh, since during
368 * slow start we begin with small TSO packets and ca->delay_min would
369 * not account for long aggregation delay when TSO packets get bigger.
370 * Ideally even with a very small RTT we would like to have at least one
371 * TSO packet being sent and received by GRO, and another one in qdisc layer.
372 * We apply another 100% factor because @rate is doubled at this point.
373 * We cap the cushion to 1ms.
374 */
375static u32 hystart_ack_delay(struct sock *sk)
376{
377 unsigned long rate;
378
379 rate = READ_ONCE(sk->sk_pacing_rate);
380 if (!rate)
381 return 0;
382 return min_t(u64, USEC_PER_MSEC,
383 div64_ul((u64)GSO_MAX_SIZE * 4 * USEC_PER_SEC, rate));
384}
385
Sangtae Haae27e982008-10-29 00:07:18 -0400386static void hystart_update(struct sock *sk, u32 delay)
387{
388 struct tcp_sock *tp = tcp_sk(sk);
389 struct bictcp *ca = inet_csk_ca(sk);
Eric Dumazetede656e2019-12-23 12:27:54 -0800390 u32 threshold;
Sangtae Haae27e982008-10-29 00:07:18 -0400391
Eric Dumazet4e1fddc2021-11-23 12:25:35 -0800392 if (after(tp->snd_una, ca->end_seq))
393 bictcp_hystart_reset(sk);
394
Eric Dumazet6e3a8a92014-12-04 16:13:23 -0800395 if (hystart_detect & HYSTART_ACK_TRAIN) {
Eric Dumazetcff04e22019-12-23 12:27:52 -0800396 u32 now = bictcp_clock_us(sk);
Sangtae Haae27e982008-10-29 00:07:18 -0400397
398 /* first detection parameter - ack-train detection */
Eric Dumazetcff04e22019-12-23 12:27:52 -0800399 if ((s32)(now - ca->last_ack) <= hystart_ack_delta_us) {
stephen hemminger17a6e9f2011-03-14 07:52:15 +0000400 ca->last_ack = now;
Eric Dumazetede656e2019-12-23 12:27:54 -0800401
Eric Dumazetf278b992019-12-30 06:06:19 -0800402 threshold = ca->delay_min + hystart_ack_delay(sk);
403
Eric Dumazetede656e2019-12-23 12:27:54 -0800404 /* Hystart ack train triggers if we get ack past
405 * ca->delay_min/2.
406 * Pacing might have delayed packets up to RTT/2
407 * during slow start.
408 */
409 if (sk->sk_pacing_status == SK_PACING_NONE)
410 threshold >>= 1;
411
412 if ((s32)(now - ca->round_start) > threshold) {
Eric Dumazet473900a2019-12-23 12:27:50 -0800413 ca->found = 1;
Eric Dumazetf278b992019-12-30 06:06:19 -0800414 pr_debug("hystart_ack_train (%u > %u) delay_min %u (+ ack_delay %u) cwnd %u\n",
415 now - ca->round_start, threshold,
416 ca->delay_min, hystart_ack_delay(sk), tp->snd_cwnd);
Eric Dumazetc10d9312016-04-29 14:16:47 -0700417 NET_INC_STATS(sock_net(sk),
418 LINUX_MIB_TCPHYSTARTTRAINDETECT);
419 NET_ADD_STATS(sock_net(sk),
420 LINUX_MIB_TCPHYSTARTTRAINCWND,
421 tp->snd_cwnd);
Eric Dumazet6e3a8a92014-12-04 16:13:23 -0800422 tp->snd_ssthresh = tp->snd_cwnd;
423 }
Sangtae Haae27e982008-10-29 00:07:18 -0400424 }
Eric Dumazet6e3a8a92014-12-04 16:13:23 -0800425 }
Sangtae Haae27e982008-10-29 00:07:18 -0400426
Eric Dumazet6e3a8a92014-12-04 16:13:23 -0800427 if (hystart_detect & HYSTART_DELAY) {
Sangtae Haae27e982008-10-29 00:07:18 -0400428 /* obtain the minimum delay of more than sampling packets */
Neal Cardwellb3445792020-06-24 12:42:02 -0400429 if (ca->curr_rtt > delay)
430 ca->curr_rtt = delay;
Sangtae Haae27e982008-10-29 00:07:18 -0400431 if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
Sangtae Haae27e982008-10-29 00:07:18 -0400432 ca->sample_cnt++;
433 } else {
434 if (ca->curr_rtt > ca->delay_min +
Eric Dumazet42eef7a2014-12-04 16:13:49 -0800435 HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
Eric Dumazet473900a2019-12-23 12:27:50 -0800436 ca->found = 1;
Eric Dumazetc10d9312016-04-29 14:16:47 -0700437 NET_INC_STATS(sock_net(sk),
438 LINUX_MIB_TCPHYSTARTDELAYDETECT);
439 NET_ADD_STATS(sock_net(sk),
440 LINUX_MIB_TCPHYSTARTDELAYCWND,
441 tp->snd_cwnd);
Eric Dumazet6e3a8a92014-12-04 16:13:23 -0800442 tp->snd_ssthresh = tp->snd_cwnd;
443 }
Sangtae Haae27e982008-10-29 00:07:18 -0400444 }
Sangtae Haae27e982008-10-29 00:07:18 -0400445 }
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800446}
447
Martin KaFai Laud22f6ad2021-03-24 18:51:55 -0700448static void cubictcp_acked(struct sock *sk, const struct ack_sample *sample)
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800449{
Sangtae Haae27e982008-10-29 00:07:18 -0400450 const struct tcp_sock *tp = tcp_sk(sk);
Stephen Hemmingere7d0c8852007-07-25 23:50:06 -0700451 struct bictcp *ca = inet_csk_ca(sk);
452 u32 delay;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800453
Stephen Hemmingere7d0c8852007-07-25 23:50:06 -0700454 /* Some calls are for duplicates without timetamps */
Lawrence Brakmo756ee172016-05-11 10:02:13 -0700455 if (sample->rtt_us < 0)
Stephen Hemmingere7d0c8852007-07-25 23:50:06 -0700456 return;
457
458 /* Discard delay samples right after fast recovery */
Eric Dumazetac35f562017-05-16 14:00:06 -0700459 if (ca->epoch_start && (s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
Stephen Hemmingere7d0c8852007-07-25 23:50:06 -0700460 return;
461
Eric Dumazetcff04e22019-12-23 12:27:52 -0800462 delay = sample->rtt_us;
Stephen Hemmingere7d0c8852007-07-25 23:50:06 -0700463 if (delay == 0)
464 delay = 1;
465
466 /* first time call or link delay decreases */
Eric Dumazetf278b992019-12-30 06:06:19 -0800467 if (ca->delay_min == 0 || ca->delay_min > delay)
468 ca->delay_min = delay;
Sangtae Haae27e982008-10-29 00:07:18 -0400469
470 /* hystart triggers when cwnd is larger than some threshold */
Eric Dumazetf278b992019-12-30 06:06:19 -0800471 if (!ca->found && tcp_in_slow_start(tp) && hystart &&
Sangtae Haae27e982008-10-29 00:07:18 -0400472 tp->snd_cwnd >= hystart_low_window)
473 hystart_update(sk, delay);
Stephen Hemmingere7d0c8852007-07-25 23:50:06 -0700474}
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800475
Stephen Hemmingera252beb2011-03-10 00:40:17 -0800476static struct tcp_congestion_ops cubictcp __read_mostly = {
Martin KaFai Laud22f6ad2021-03-24 18:51:55 -0700477 .init = cubictcp_init,
478 .ssthresh = cubictcp_recalc_ssthresh,
479 .cong_avoid = cubictcp_cong_avoid,
480 .set_state = cubictcp_state,
Yuchung Chengf1722a12017-08-03 20:38:52 -0700481 .undo_cwnd = tcp_reno_undo_cwnd,
Martin KaFai Laud22f6ad2021-03-24 18:51:55 -0700482 .cwnd_event = cubictcp_cwnd_event,
483 .pkts_acked = cubictcp_acked,
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800484 .owner = THIS_MODULE,
485 .name = "cubic",
486};
487
Kumar Kartikeya Dwivedi0e32dfc2021-10-02 06:47:53 +0530488BTF_SET_START(tcp_cubic_kfunc_ids)
489#ifdef CONFIG_X86
490#ifdef CONFIG_DYNAMIC_FTRACE
491BTF_ID(func, cubictcp_init)
492BTF_ID(func, cubictcp_recalc_ssthresh)
493BTF_ID(func, cubictcp_cong_avoid)
494BTF_ID(func, cubictcp_state)
495BTF_ID(func, cubictcp_cwnd_event)
496BTF_ID(func, cubictcp_acked)
497#endif
498#endif
499BTF_SET_END(tcp_cubic_kfunc_ids)
500
501static DEFINE_KFUNC_BTF_ID_SET(&tcp_cubic_kfunc_ids, tcp_cubic_kfunc_btf_set);
502
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800503static int __init cubictcp_register(void)
504{
Kumar Kartikeya Dwivedi0e32dfc2021-10-02 06:47:53 +0530505 int ret;
506
Alexey Dobriyan74975d42006-08-25 17:10:33 -0700507 BUILD_BUG_ON(sizeof(struct bictcp) > ICSK_CA_PRIV_SIZE);
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -0800508
509 /* Precompute a bunch of the scaling factors that are used per-packet
510 * based on SRTT of 100ms
511 */
512
stephen hemminger688d1942014-08-29 23:32:05 -0700513 beta_scale = 8*(BICTCP_BETA_SCALE+beta) / 3
514 / (BICTCP_BETA_SCALE - beta);
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -0800515
Stephen Hemminger22119242006-10-25 23:04:12 -0700516 cube_rtt_scale = (bic_scale * 10); /* 1024*c/rtt */
Stephen Hemminger89b3d9a2005-12-21 19:32:08 -0800517
518 /* calculate the "K" for (wmax-cwnd) = c/rtt * K^3
519 * so K = cubic_root( (wmax-cwnd)*rtt/c )
520 * the unit of K is bictcp_HZ=2^10, not HZ
521 *
522 * c = bic_scale >> 10
523 * rtt = 100ms
524 *
525 * the following code has been designed and tested for
526 * cwnd < 1 million packets
527 * RTT < 100 seconds
528 * HZ < 1,000,00 (corresponding to 10 nano-second)
529 */
530
531 /* 1/c * 2^2*bictcp_HZ * srtt */
532 cube_factor = 1ull << (10+3*BICTCP_HZ); /* 2^40 */
533
534 /* divide by bic_scale and by constant Srtt (100ms) */
535 do_div(cube_factor, bic_scale * 10);
536
Kumar Kartikeya Dwivedi0e32dfc2021-10-02 06:47:53 +0530537 ret = tcp_register_congestion_control(&cubictcp);
538 if (ret)
539 return ret;
540 register_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set);
541 return 0;
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800542}
543
544static void __exit cubictcp_unregister(void)
545{
Kumar Kartikeya Dwivedi0e32dfc2021-10-02 06:47:53 +0530546 unregister_kfunc_btf_id_set(&bpf_tcp_ca_kfunc_list, &tcp_cubic_kfunc_btf_set);
Stephen Hemmingerdf3271f2005-12-13 23:13:28 -0800547 tcp_unregister_congestion_control(&cubictcp);
548}
549
550module_init(cubictcp_register);
551module_exit(cubictcp_unregister);
552
553MODULE_AUTHOR("Sangtae Ha, Stephen Hemminger");
554MODULE_LICENSE("GPL");
555MODULE_DESCRIPTION("CUBIC TCP");
Sangtae Haae27e982008-10-29 00:07:18 -0400556MODULE_VERSION("2.3");