Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Yuchung Cheng | 659a8ad | 2015-10-16 21:57:46 -0700 | [diff] [blame] | 2 | #include <linux/tcp.h> |
| 3 | #include <net/tcp.h> |
| 4 | |
Yuchung Cheng | db8da6b | 2017-01-12 22:11:30 -0800 | [diff] [blame] | 5 | static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) |
| 6 | { |
| 7 | struct tcp_sock *tp = tcp_sk(sk); |
| 8 | |
| 9 | tcp_skb_mark_lost_uncond_verify(tp, skb); |
| 10 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { |
| 11 | /* Account for retransmits that are lost again */ |
| 12 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; |
| 13 | tp->retrans_out -= tcp_skb_pcount(skb); |
Yuchung Cheng | ecde8f3 | 2017-04-04 14:15:39 -0700 | [diff] [blame] | 14 | NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT, |
| 15 | tcp_skb_pcount(skb)); |
Yuchung Cheng | db8da6b | 2017-01-12 22:11:30 -0800 | [diff] [blame] | 16 | } |
| 17 | } |
| 18 | |
Eric Dumazet | 9a568de | 2017-05-16 14:00:14 -0700 | [diff] [blame] | 19 | static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2) |
Yuchung Cheng | 1d0833d | 2017-01-12 22:11:34 -0800 | [diff] [blame] | 20 | { |
Eric Dumazet | 9a568de | 2017-05-16 14:00:14 -0700 | [diff] [blame] | 21 | return t1 > t2 || (t1 == t2 && after(seq1, seq2)); |
Yuchung Cheng | 1d0833d | 2017-01-12 22:11:34 -0800 | [diff] [blame] | 22 | } |
| 23 | |
Yuchung Cheng | a0370b3 | 2017-01-12 22:11:36 -0800 | [diff] [blame] | 24 | /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): |
| 25 | * |
| 26 | * Marks a packet lost, if some packet sent later has been (s)acked. |
Yuchung Cheng | 4f41b1c | 2015-10-16 21:57:47 -0700 | [diff] [blame] | 27 | * The underlying idea is similar to the traditional dupthresh and FACK |
| 28 | * but they look at different metrics: |
| 29 | * |
| 30 | * dupthresh: 3 OOO packets delivered (packet count) |
| 31 | * FACK: sequence delta to highest sacked sequence (sequence space) |
| 32 | * RACK: sent time delta to the latest delivered packet (time domain) |
| 33 | * |
| 34 | * The advantage of RACK is it applies to both original and retransmitted |
| 35 | * packet and therefore is robust against tail losses. Another advantage |
| 36 | * is being more resilient to reordering by simply allowing some |
| 37 | * "settling delay", instead of tweaking the dupthresh. |
| 38 | * |
Yuchung Cheng | a0370b3 | 2017-01-12 22:11:36 -0800 | [diff] [blame] | 39 | * When tcp_rack_detect_loss() detects some packets are lost and we |
| 40 | * are not already in the CA_Recovery state, either tcp_rack_reo_timeout() |
| 41 | * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will |
| 42 | * make us enter the CA_Recovery state. |
Yuchung Cheng | 4f41b1c | 2015-10-16 21:57:47 -0700 | [diff] [blame] | 43 | */ |
Eric Dumazet | 7c1c730 | 2017-04-25 10:15:33 -0700 | [diff] [blame] | 44 | static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) |
Yuchung Cheng | 4f41b1c | 2015-10-16 21:57:47 -0700 | [diff] [blame] | 45 | { |
| 46 | struct tcp_sock *tp = tcp_sk(sk); |
Priyaranjan Jha | 1f25569 | 2017-11-03 16:38:48 -0700 | [diff] [blame] | 47 | u32 min_rtt = tcp_min_rtt(tp); |
Yuchung Cheng | 043b87d | 2017-10-04 12:59:59 -0700 | [diff] [blame] | 48 | struct sk_buff *skb, *n; |
Yuchung Cheng | e636f8b | 2017-01-12 22:11:31 -0800 | [diff] [blame] | 49 | u32 reo_wnd; |
Yuchung Cheng | 4f41b1c | 2015-10-16 21:57:47 -0700 | [diff] [blame] | 50 | |
Yuchung Cheng | 57dde7f | 2017-01-12 22:11:33 -0800 | [diff] [blame] | 51 | *reo_timeout = 0; |
Yuchung Cheng | 4f41b1c | 2015-10-16 21:57:47 -0700 | [diff] [blame] | 52 | /* To be more reordering resilient, allow min_rtt/4 settling delay |
| 53 | * (lower-bounded to 1000uS). We use min_rtt instead of the smoothed |
| 54 | * RTT because reordering is often a path property and less related |
| 55 | * to queuing or delayed ACKs. |
Yuchung Cheng | 4f41b1c | 2015-10-16 21:57:47 -0700 | [diff] [blame] | 56 | */ |
| 57 | reo_wnd = 1000; |
Yuchung Cheng | 0ce294d | 2017-12-07 11:33:30 -0800 | [diff] [blame] | 58 | if ((tp->rack.reord || inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery) && |
| 59 | min_rtt != ~0U) { |
Priyaranjan Jha | 1f25569 | 2017-11-03 16:38:48 -0700 | [diff] [blame] | 60 | reo_wnd = max((min_rtt >> 2) * tp->rack.reo_wnd_steps, reo_wnd); |
| 61 | reo_wnd = min(reo_wnd, tp->srtt_us >> 3); |
| 62 | } |
Yuchung Cheng | 4f41b1c | 2015-10-16 21:57:47 -0700 | [diff] [blame] | 63 | |
Yuchung Cheng | 043b87d | 2017-10-04 12:59:59 -0700 | [diff] [blame] | 64 | list_for_each_entry_safe(skb, n, &tp->tsorted_sent_queue, |
| 65 | tcp_tsorted_anchor) { |
Yuchung Cheng | 4f41b1c | 2015-10-16 21:57:47 -0700 | [diff] [blame] | 66 | struct tcp_skb_cb *scb = TCP_SKB_CB(skb); |
Yuchung Cheng | bef0622 | 2017-10-04 13:00:00 -0700 | [diff] [blame] | 67 | s32 remaining; |
Yuchung Cheng | 4f41b1c | 2015-10-16 21:57:47 -0700 | [diff] [blame] | 68 | |
Yuchung Cheng | bef0622 | 2017-10-04 13:00:00 -0700 | [diff] [blame] | 69 | /* Skip ones marked lost but not yet retransmitted */ |
| 70 | if ((scb->sacked & TCPCB_LOST) && |
| 71 | !(scb->sacked & TCPCB_SACKED_RETRANS)) |
| 72 | continue; |
Yuchung Cheng | 57dde7f | 2017-01-12 22:11:33 -0800 | [diff] [blame] | 73 | |
Yuchung Cheng | bef0622 | 2017-10-04 13:00:00 -0700 | [diff] [blame] | 74 | if (!tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp, |
| 75 | tp->rack.end_seq, scb->end_seq)) |
| 76 | break; |
Yuchung Cheng | 57dde7f | 2017-01-12 22:11:33 -0800 | [diff] [blame] | 77 | |
Yuchung Cheng | bef0622 | 2017-10-04 13:00:00 -0700 | [diff] [blame] | 78 | /* A packet is lost if it has not been s/acked beyond |
| 79 | * the recent RTT plus the reordering window. |
| 80 | */ |
| 81 | remaining = tp->rack.rtt_us + reo_wnd - |
| 82 | tcp_stamp_us_delta(tp->tcp_mstamp, skb->skb_mstamp); |
Yuchung Cheng | 428aec5 | 2017-12-07 11:33:32 -0800 | [diff] [blame] | 83 | if (remaining <= 0) { |
Yuchung Cheng | bef0622 | 2017-10-04 13:00:00 -0700 | [diff] [blame] | 84 | tcp_rack_mark_skb_lost(sk, skb); |
| 85 | list_del_init(&skb->tcp_tsorted_anchor); |
| 86 | } else { |
Yuchung Cheng | 428aec5 | 2017-12-07 11:33:32 -0800 | [diff] [blame] | 87 | /* Record maximum wait time */ |
| 88 | *reo_timeout = max_t(u32, *reo_timeout, remaining); |
Yuchung Cheng | 4f41b1c | 2015-10-16 21:57:47 -0700 | [diff] [blame] | 89 | } |
| 90 | } |
Yuchung Cheng | e636f8b | 2017-01-12 22:11:31 -0800 | [diff] [blame] | 91 | } |
| 92 | |
Eric Dumazet | 128eda8 | 2017-04-25 10:15:34 -0700 | [diff] [blame] | 93 | void tcp_rack_mark_lost(struct sock *sk) |
Yuchung Cheng | e636f8b | 2017-01-12 22:11:31 -0800 | [diff] [blame] | 94 | { |
| 95 | struct tcp_sock *tp = tcp_sk(sk); |
Yuchung Cheng | 57dde7f | 2017-01-12 22:11:33 -0800 | [diff] [blame] | 96 | u32 timeout; |
Yuchung Cheng | e636f8b | 2017-01-12 22:11:31 -0800 | [diff] [blame] | 97 | |
Yuchung Cheng | a0370b3 | 2017-01-12 22:11:36 -0800 | [diff] [blame] | 98 | if (!tp->rack.advanced) |
Yuchung Cheng | e636f8b | 2017-01-12 22:11:31 -0800 | [diff] [blame] | 99 | return; |
Yuchung Cheng | 57dde7f | 2017-01-12 22:11:33 -0800 | [diff] [blame] | 100 | |
Yuchung Cheng | e636f8b | 2017-01-12 22:11:31 -0800 | [diff] [blame] | 101 | /* Reset the advanced flag to avoid unnecessary queue scanning */ |
| 102 | tp->rack.advanced = 0; |
Eric Dumazet | 7c1c730 | 2017-04-25 10:15:33 -0700 | [diff] [blame] | 103 | tcp_rack_detect_loss(sk, &timeout); |
Yuchung Cheng | 57dde7f | 2017-01-12 22:11:33 -0800 | [diff] [blame] | 104 | if (timeout) { |
Yuchung Cheng | bb4d991 | 2017-07-19 15:41:26 -0700 | [diff] [blame] | 105 | timeout = usecs_to_jiffies(timeout) + TCP_TIMEOUT_MIN; |
Yuchung Cheng | 57dde7f | 2017-01-12 22:11:33 -0800 | [diff] [blame] | 106 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT, |
| 107 | timeout, inet_csk(sk)->icsk_rto); |
| 108 | } |
Yuchung Cheng | 4f41b1c | 2015-10-16 21:57:47 -0700 | [diff] [blame] | 109 | } |
| 110 | |
Yuchung Cheng | deed7be | 2017-01-12 22:11:32 -0800 | [diff] [blame] | 111 | /* Record the most recently (re)sent time among the (s)acked packets |
| 112 | * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from |
| 113 | * draft-cheng-tcpm-rack-00.txt |
| 114 | */ |
Yuchung Cheng | 1d0833d | 2017-01-12 22:11:34 -0800 | [diff] [blame] | 115 | void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, |
Eric Dumazet | 9a568de | 2017-05-16 14:00:14 -0700 | [diff] [blame] | 116 | u64 xmit_time) |
Yuchung Cheng | 659a8ad | 2015-10-16 21:57:46 -0700 | [diff] [blame] | 117 | { |
Yuchung Cheng | deed7be | 2017-01-12 22:11:32 -0800 | [diff] [blame] | 118 | u32 rtt_us; |
| 119 | |
Eric Dumazet | 9a568de | 2017-05-16 14:00:14 -0700 | [diff] [blame] | 120 | rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time); |
Yuchung Cheng | 6065fd0 | 2017-12-07 11:33:33 -0800 | [diff] [blame^] | 121 | if (rtt_us < tcp_min_rtt(tp) && (sacked & TCPCB_RETRANS)) { |
Yuchung Cheng | 659a8ad | 2015-10-16 21:57:46 -0700 | [diff] [blame] | 122 | /* If the sacked packet was retransmitted, it's ambiguous |
| 123 | * whether the retransmission or the original (or the prior |
| 124 | * retransmission) was sacked. |
| 125 | * |
| 126 | * If the original is lost, there is no ambiguity. Otherwise |
| 127 | * we assume the original can be delayed up to aRTT + min_rtt. |
| 128 | * the aRTT term is bounded by the fast recovery or timeout, |
| 129 | * so it's at least one RTT (i.e., retransmission is at least |
| 130 | * an RTT later). |
| 131 | */ |
Yuchung Cheng | 6065fd0 | 2017-12-07 11:33:33 -0800 | [diff] [blame^] | 132 | return; |
Yuchung Cheng | 659a8ad | 2015-10-16 21:57:46 -0700 | [diff] [blame] | 133 | } |
Yuchung Cheng | 659a8ad | 2015-10-16 21:57:46 -0700 | [diff] [blame] | 134 | tp->rack.advanced = 1; |
Yuchung Cheng | 6065fd0 | 2017-12-07 11:33:33 -0800 | [diff] [blame^] | 135 | tp->rack.rtt_us = rtt_us; |
| 136 | if (tcp_rack_sent_after(xmit_time, tp->rack.mstamp, |
| 137 | end_seq, tp->rack.end_seq)) { |
| 138 | tp->rack.mstamp = xmit_time; |
| 139 | tp->rack.end_seq = end_seq; |
| 140 | } |
Yuchung Cheng | 659a8ad | 2015-10-16 21:57:46 -0700 | [diff] [blame] | 141 | } |
Yuchung Cheng | 57dde7f | 2017-01-12 22:11:33 -0800 | [diff] [blame] | 142 | |
| 143 | /* We have waited long enough to accommodate reordering. Mark the expired |
| 144 | * packets lost and retransmit them. |
| 145 | */ |
| 146 | void tcp_rack_reo_timeout(struct sock *sk) |
| 147 | { |
| 148 | struct tcp_sock *tp = tcp_sk(sk); |
Yuchung Cheng | 57dde7f | 2017-01-12 22:11:33 -0800 | [diff] [blame] | 149 | u32 timeout, prior_inflight; |
| 150 | |
Yuchung Cheng | 57dde7f | 2017-01-12 22:11:33 -0800 | [diff] [blame] | 151 | prior_inflight = tcp_packets_in_flight(tp); |
Eric Dumazet | 7c1c730 | 2017-04-25 10:15:33 -0700 | [diff] [blame] | 152 | tcp_rack_detect_loss(sk, &timeout); |
Yuchung Cheng | 57dde7f | 2017-01-12 22:11:33 -0800 | [diff] [blame] | 153 | if (prior_inflight != tcp_packets_in_flight(tp)) { |
| 154 | if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { |
| 155 | tcp_enter_recovery(sk, false); |
| 156 | if (!inet_csk(sk)->icsk_ca_ops->cong_control) |
| 157 | tcp_cwnd_reduction(sk, 1, 0); |
| 158 | } |
| 159 | tcp_xmit_retransmit_queue(sk); |
| 160 | } |
| 161 | if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS) |
| 162 | tcp_rearm_rto(sk); |
| 163 | } |
Priyaranjan Jha | 1f25569 | 2017-11-03 16:38:48 -0700 | [diff] [blame] | 164 | |
| 165 | /* Updates the RACK's reo_wnd based on DSACK and no. of recoveries. |
| 166 | * |
| 167 | * If DSACK is received, increment reo_wnd by min_rtt/4 (upper bounded |
| 168 | * by srtt), since there is possibility that spurious retransmission was |
| 169 | * due to reordering delay longer than reo_wnd. |
| 170 | * |
| 171 | * Persist the current reo_wnd value for TCP_RACK_RECOVERY_THRESH (16) |
| 172 | * no. of successful recoveries (accounts for full DSACK-based loss |
| 173 | * recovery undo). After that, reset it to default (min_rtt/4). |
| 174 | * |
| 175 | * At max, reo_wnd is incremented only once per rtt. So that the new |
| 176 | * DSACK on which we are reacting, is due to the spurious retx (approx) |
| 177 | * after the reo_wnd has been updated last time. |
| 178 | * |
| 179 | * reo_wnd is tracked in terms of steps (of min_rtt/4), rather than |
| 180 | * absolute value to account for change in rtt. |
| 181 | */ |
| 182 | void tcp_rack_update_reo_wnd(struct sock *sk, struct rate_sample *rs) |
| 183 | { |
| 184 | struct tcp_sock *tp = tcp_sk(sk); |
| 185 | |
| 186 | if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_STATIC_REO_WND || |
| 187 | !rs->prior_delivered) |
| 188 | return; |
| 189 | |
| 190 | /* Disregard DSACK if a rtt has not passed since we adjusted reo_wnd */ |
| 191 | if (before(rs->prior_delivered, tp->rack.last_delivered)) |
| 192 | tp->rack.dsack_seen = 0; |
| 193 | |
| 194 | /* Adjust the reo_wnd if update is pending */ |
| 195 | if (tp->rack.dsack_seen) { |
| 196 | tp->rack.reo_wnd_steps = min_t(u32, 0xFF, |
| 197 | tp->rack.reo_wnd_steps + 1); |
| 198 | tp->rack.dsack_seen = 0; |
| 199 | tp->rack.last_delivered = tp->delivered; |
| 200 | tp->rack.reo_wnd_persist = TCP_RACK_RECOVERY_THRESH; |
| 201 | } else if (!tp->rack.reo_wnd_persist) { |
| 202 | tp->rack.reo_wnd_steps = 1; |
| 203 | } |
| 204 | } |