Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * INET An implementation of the TCP/IP protocol suite for the LINUX |
| 3 | * operating system. INET is implemented using the BSD Socket |
| 4 | * interface as the means of communication with the user level. |
| 5 | * |
| 6 | * Implementation of the Transmission Control Protocol(TCP). |
| 7 | * |
| 8 | * Version: $Id: tcp_output.c,v 1.146 2002/02/01 22:01:04 davem Exp $ |
| 9 | * |
Jesper Juhl | 02c30a8 | 2005-05-05 16:16:16 -0700 | [diff] [blame] | 10 | * Authors: Ross Biro |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> |
| 12 | * Mark Evans, <evansmp@uhura.aston.ac.uk> |
| 13 | * Corey Minyard <wf-rch!minyard@relay.EU.net> |
| 14 | * Florian La Roche, <flla@stud.uni-sb.de> |
| 15 | * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> |
| 16 | * Linus Torvalds, <torvalds@cs.helsinki.fi> |
| 17 | * Alan Cox, <gw4pts@gw4pts.ampr.org> |
| 18 | * Matthew Dillon, <dillon@apollo.west.oic.com> |
| 19 | * Arnt Gulbrandsen, <agulbra@nvg.unit.no> |
| 20 | * Jorge Cwik, <jorge@laser.satlink.net> |
| 21 | */ |
| 22 | |
| 23 | /* |
| 24 | * Changes: Pedro Roque : Retransmit queue handled by TCP. |
| 25 | * : Fragmentation on mtu decrease |
| 26 | * : Segment collapse on retransmit |
| 27 | * : AF independence |
| 28 | * |
| 29 | * Linus Torvalds : send_delayed_ack |
| 30 | * David S. Miller : Charge memory using the right skb |
| 31 | * during syn/ack processing. |
| 32 | * David S. Miller : Output engine completely rewritten. |
| 33 | * Andrea Arcangeli: SYNACK carry ts_recent in tsecr. |
| 34 | * Cacophonix Gaul : draft-minshall-nagle-01 |
| 35 | * J Hadi Salim : ECN support |
| 36 | * |
| 37 | */ |
| 38 | |
| 39 | #include <net/tcp.h> |
| 40 | |
| 41 | #include <linux/compiler.h> |
| 42 | #include <linux/module.h> |
| 43 | #include <linux/smp_lock.h> |
| 44 | |
| 45 | /* People can turn this off for buggy TCP's found in printers etc. */ |
| 46 | int sysctl_tcp_retrans_collapse = 1; |
| 47 | |
| 48 | /* This limits the percentage of the congestion window which we |
| 49 | * will allow a single TSO frame to consume. Building TSO frames |
| 50 | * which are too large can cause TCP streams to be bursty. |
| 51 | */ |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 52 | int sysctl_tcp_tso_win_divisor = 3; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 53 | |
| 54 | static inline void update_send_head(struct sock *sk, struct tcp_sock *tp, |
| 55 | struct sk_buff *skb) |
| 56 | { |
| 57 | sk->sk_send_head = skb->next; |
| 58 | if (sk->sk_send_head == (struct sk_buff *)&sk->sk_write_queue) |
| 59 | sk->sk_send_head = NULL; |
| 60 | tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; |
| 61 | tcp_packets_out_inc(sk, tp, skb); |
| 62 | } |
| 63 | |
| 64 | /* SND.NXT, if window was not shrunk. |
| 65 | * If window has been shrunk, what should we make? It is not clear at all. |
| 66 | * Using SND.UNA we will fail to open window, SND.NXT is out of window. :-( |
| 67 | * Anything in between SND.UNA...SND.UNA+SND.WND also can be already |
| 68 | * invalid. OK, let's make this for now: |
| 69 | */ |
| 70 | static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) |
| 71 | { |
| 72 | if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) |
| 73 | return tp->snd_nxt; |
| 74 | else |
| 75 | return tp->snd_una+tp->snd_wnd; |
| 76 | } |
| 77 | |
| 78 | /* Calculate mss to advertise in SYN segment. |
| 79 | * RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that: |
| 80 | * |
| 81 | * 1. It is independent of path mtu. |
| 82 | * 2. Ideally, it is maximal possible segment size i.e. 65535-40. |
| 83 | * 3. For IPv4 it is reasonable to calculate it from maximal MTU of |
| 84 | * attached devices, because some buggy hosts are confused by |
| 85 | * large MSS. |
| 86 | * 4. We do not make 3, we advertise MSS, calculated from first |
| 87 | * hop device mtu, but allow to raise it to ip_rt_min_advmss. |
| 88 | * This may be overridden via information stored in routing table. |
| 89 | * 5. Value 65535 for MSS is valid in IPv6 and means "as large as possible, |
| 90 | * probably even Jumbo". |
| 91 | */ |
| 92 | static __u16 tcp_advertise_mss(struct sock *sk) |
| 93 | { |
| 94 | struct tcp_sock *tp = tcp_sk(sk); |
| 95 | struct dst_entry *dst = __sk_dst_get(sk); |
| 96 | int mss = tp->advmss; |
| 97 | |
| 98 | if (dst && dst_metric(dst, RTAX_ADVMSS) < mss) { |
| 99 | mss = dst_metric(dst, RTAX_ADVMSS); |
| 100 | tp->advmss = mss; |
| 101 | } |
| 102 | |
| 103 | return (__u16)mss; |
| 104 | } |
| 105 | |
| 106 | /* RFC2861. Reset CWND after idle period longer RTO to "restart window". |
| 107 | * This is the first part of cwnd validation mechanism. */ |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 108 | static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | { |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 110 | struct tcp_sock *tp = tcp_sk(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 111 | s32 delta = tcp_time_stamp - tp->lsndtime; |
| 112 | u32 restart_cwnd = tcp_init_cwnd(tp, dst); |
| 113 | u32 cwnd = tp->snd_cwnd; |
| 114 | |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 115 | tcp_ca_event(sk, CA_EVENT_CWND_RESTART); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 116 | |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 117 | tp->snd_ssthresh = tcp_current_ssthresh(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 118 | restart_cwnd = min(restart_cwnd, cwnd); |
| 119 | |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 120 | while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | cwnd >>= 1; |
| 122 | tp->snd_cwnd = max(cwnd, restart_cwnd); |
| 123 | tp->snd_cwnd_stamp = tcp_time_stamp; |
| 124 | tp->snd_cwnd_used = 0; |
| 125 | } |
| 126 | |
| 127 | static inline void tcp_event_data_sent(struct tcp_sock *tp, |
| 128 | struct sk_buff *skb, struct sock *sk) |
| 129 | { |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 130 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 131 | const u32 now = tcp_time_stamp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 133 | if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto) |
| 134 | tcp_cwnd_restart(sk, __sk_dst_get(sk)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 135 | |
| 136 | tp->lsndtime = now; |
| 137 | |
| 138 | /* If it is a reply for ato after last received |
| 139 | * packet, enter pingpong mode. |
| 140 | */ |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 141 | if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato) |
| 142 | icsk->icsk_ack.pingpong = 1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | } |
| 144 | |
David S. Miller | fc6415bc | 2005-07-05 15:17:45 -0700 | [diff] [blame] | 145 | static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 146 | { |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 147 | tcp_dec_quickack_mode(sk, pkts); |
| 148 | inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 149 | } |
| 150 | |
| 151 | /* Determine a window scaling and initial window to offer. |
| 152 | * Based on the assumption that the given amount of space |
| 153 | * will be offered. Store the results in the tp structure. |
| 154 | * NOTE: for smooth operation initial space offering should |
| 155 | * be a multiple of mss if possible. We assume here that mss >= 1. |
| 156 | * This MUST be enforced by all callers. |
| 157 | */ |
| 158 | void tcp_select_initial_window(int __space, __u32 mss, |
| 159 | __u32 *rcv_wnd, __u32 *window_clamp, |
| 160 | int wscale_ok, __u8 *rcv_wscale) |
| 161 | { |
| 162 | unsigned int space = (__space < 0 ? 0 : __space); |
| 163 | |
| 164 | /* If no clamp set the clamp to the max possible scaled window */ |
| 165 | if (*window_clamp == 0) |
| 166 | (*window_clamp) = (65535 << 14); |
| 167 | space = min(*window_clamp, space); |
| 168 | |
| 169 | /* Quantize space offering to a multiple of mss if possible. */ |
| 170 | if (space > mss) |
| 171 | space = (space / mss) * mss; |
| 172 | |
| 173 | /* NOTE: offering an initial window larger than 32767 |
| 174 | * will break some buggy TCP stacks. We try to be nice. |
| 175 | * If we are not window scaling, then this truncates |
| 176 | * our initial window offering to 32k. There should also |
| 177 | * be a sysctl option to stop being nice. |
| 178 | */ |
| 179 | (*rcv_wnd) = min(space, MAX_TCP_WINDOW); |
| 180 | (*rcv_wscale) = 0; |
| 181 | if (wscale_ok) { |
| 182 | /* Set window scaling on max possible window |
| 183 | * See RFC1323 for an explanation of the limit to 14 |
| 184 | */ |
| 185 | space = max_t(u32, sysctl_tcp_rmem[2], sysctl_rmem_max); |
| 186 | while (space > 65535 && (*rcv_wscale) < 14) { |
| 187 | space >>= 1; |
| 188 | (*rcv_wscale)++; |
| 189 | } |
| 190 | } |
| 191 | |
| 192 | /* Set initial window to value enough for senders, |
| 193 | * following RFC1414. Senders, not following this RFC, |
| 194 | * will be satisfied with 2. |
| 195 | */ |
| 196 | if (mss > (1<<*rcv_wscale)) { |
| 197 | int init_cwnd = 4; |
| 198 | if (mss > 1460*3) |
| 199 | init_cwnd = 2; |
| 200 | else if (mss > 1460) |
| 201 | init_cwnd = 3; |
| 202 | if (*rcv_wnd > init_cwnd*mss) |
| 203 | *rcv_wnd = init_cwnd*mss; |
| 204 | } |
| 205 | |
| 206 | /* Set the clamp no higher than max representable value */ |
| 207 | (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp); |
| 208 | } |
| 209 | |
| 210 | /* Chose a new window to advertise, update state in tcp_sock for the |
| 211 | * socket, and return result with RFC1323 scaling applied. The return |
| 212 | * value can be stuffed directly into th->window for an outgoing |
| 213 | * frame. |
| 214 | */ |
| 215 | static __inline__ u16 tcp_select_window(struct sock *sk) |
| 216 | { |
| 217 | struct tcp_sock *tp = tcp_sk(sk); |
| 218 | u32 cur_win = tcp_receive_window(tp); |
| 219 | u32 new_win = __tcp_select_window(sk); |
| 220 | |
| 221 | /* Never shrink the offered window */ |
| 222 | if(new_win < cur_win) { |
| 223 | /* Danger Will Robinson! |
| 224 | * Don't update rcv_wup/rcv_wnd here or else |
| 225 | * we will not be able to advertise a zero |
| 226 | * window in time. --DaveM |
| 227 | * |
| 228 | * Relax Will Robinson. |
| 229 | */ |
| 230 | new_win = cur_win; |
| 231 | } |
| 232 | tp->rcv_wnd = new_win; |
| 233 | tp->rcv_wup = tp->rcv_nxt; |
| 234 | |
| 235 | /* Make sure we do not exceed the maximum possible |
| 236 | * scaled window. |
| 237 | */ |
| 238 | if (!tp->rx_opt.rcv_wscale) |
| 239 | new_win = min(new_win, MAX_TCP_WINDOW); |
| 240 | else |
| 241 | new_win = min(new_win, (65535U << tp->rx_opt.rcv_wscale)); |
| 242 | |
| 243 | /* RFC1323 scaling applied */ |
| 244 | new_win >>= tp->rx_opt.rcv_wscale; |
| 245 | |
| 246 | /* If we advertise zero window, disable fast path. */ |
| 247 | if (new_win == 0) |
| 248 | tp->pred_flags = 0; |
| 249 | |
| 250 | return new_win; |
| 251 | } |
| 252 | |
| 253 | |
| 254 | /* This routine actually transmits TCP packets queued in by |
| 255 | * tcp_do_sendmsg(). This is used by both the initial |
| 256 | * transmission and possible later retransmissions. |
| 257 | * All SKB's seen here are completely headerless. It is our |
| 258 | * job to build the TCP header, and pass the packet down to |
| 259 | * IP so it can do the same plus pass the packet off to the |
| 260 | * device. |
| 261 | * |
| 262 | * We are working here with either a clone of the original |
| 263 | * SKB, or a fresh unique copy made by the retransmit engine. |
| 264 | */ |
| 265 | static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb) |
| 266 | { |
| 267 | if (skb != NULL) { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 268 | const struct inet_connection_sock *icsk = inet_csk(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 269 | struct inet_sock *inet = inet_sk(sk); |
| 270 | struct tcp_sock *tp = tcp_sk(sk); |
| 271 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
| 272 | int tcp_header_size = tp->tcp_header_len; |
| 273 | struct tcphdr *th; |
| 274 | int sysctl_flags; |
| 275 | int err; |
| 276 | |
| 277 | BUG_ON(!tcp_skb_pcount(skb)); |
| 278 | |
| 279 | #define SYSCTL_FLAG_TSTAMPS 0x1 |
| 280 | #define SYSCTL_FLAG_WSCALE 0x2 |
| 281 | #define SYSCTL_FLAG_SACK 0x4 |
| 282 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 283 | /* If congestion control is doing timestamping */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 284 | if (icsk->icsk_ca_ops->rtt_sample) |
Patrick McHardy | a61bbcf | 2005-08-14 17:24:31 -0700 | [diff] [blame] | 285 | __net_timestamp(skb); |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 286 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 287 | sysctl_flags = 0; |
| 288 | if (tcb->flags & TCPCB_FLAG_SYN) { |
| 289 | tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS; |
| 290 | if(sysctl_tcp_timestamps) { |
| 291 | tcp_header_size += TCPOLEN_TSTAMP_ALIGNED; |
| 292 | sysctl_flags |= SYSCTL_FLAG_TSTAMPS; |
| 293 | } |
| 294 | if(sysctl_tcp_window_scaling) { |
| 295 | tcp_header_size += TCPOLEN_WSCALE_ALIGNED; |
| 296 | sysctl_flags |= SYSCTL_FLAG_WSCALE; |
| 297 | } |
| 298 | if(sysctl_tcp_sack) { |
| 299 | sysctl_flags |= SYSCTL_FLAG_SACK; |
| 300 | if(!(sysctl_flags & SYSCTL_FLAG_TSTAMPS)) |
| 301 | tcp_header_size += TCPOLEN_SACKPERM_ALIGNED; |
| 302 | } |
| 303 | } else if (tp->rx_opt.eff_sacks) { |
| 304 | /* A SACK is 2 pad bytes, a 2 byte header, plus |
| 305 | * 2 32-bit sequence numbers for each SACK block. |
| 306 | */ |
| 307 | tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED + |
| 308 | (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); |
| 309 | } |
| 310 | |
Stephen Hemminger | 317a76f | 2005-06-23 12:19:55 -0700 | [diff] [blame] | 311 | if (tcp_packets_in_flight(tp) == 0) |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 312 | tcp_ca_event(sk, CA_EVENT_TX_START); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 313 | |
| 314 | th = (struct tcphdr *) skb_push(skb, tcp_header_size); |
| 315 | skb->h.th = th; |
| 316 | skb_set_owner_w(skb, sk); |
| 317 | |
| 318 | /* Build TCP header and checksum it. */ |
| 319 | th->source = inet->sport; |
| 320 | th->dest = inet->dport; |
| 321 | th->seq = htonl(tcb->seq); |
| 322 | th->ack_seq = htonl(tp->rcv_nxt); |
| 323 | *(((__u16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) | tcb->flags); |
| 324 | if (tcb->flags & TCPCB_FLAG_SYN) { |
| 325 | /* RFC1323: The window in SYN & SYN/ACK segments |
| 326 | * is never scaled. |
| 327 | */ |
| 328 | th->window = htons(tp->rcv_wnd); |
| 329 | } else { |
| 330 | th->window = htons(tcp_select_window(sk)); |
| 331 | } |
| 332 | th->check = 0; |
| 333 | th->urg_ptr = 0; |
| 334 | |
| 335 | if (tp->urg_mode && |
| 336 | between(tp->snd_up, tcb->seq+1, tcb->seq+0xFFFF)) { |
| 337 | th->urg_ptr = htons(tp->snd_up-tcb->seq); |
| 338 | th->urg = 1; |
| 339 | } |
| 340 | |
| 341 | if (tcb->flags & TCPCB_FLAG_SYN) { |
| 342 | tcp_syn_build_options((__u32 *)(th + 1), |
| 343 | tcp_advertise_mss(sk), |
| 344 | (sysctl_flags & SYSCTL_FLAG_TSTAMPS), |
| 345 | (sysctl_flags & SYSCTL_FLAG_SACK), |
| 346 | (sysctl_flags & SYSCTL_FLAG_WSCALE), |
| 347 | tp->rx_opt.rcv_wscale, |
| 348 | tcb->when, |
| 349 | tp->rx_opt.ts_recent); |
| 350 | } else { |
| 351 | tcp_build_and_update_options((__u32 *)(th + 1), |
| 352 | tp, tcb->when); |
| 353 | |
| 354 | TCP_ECN_send(sk, tp, skb, tcp_header_size); |
| 355 | } |
| 356 | tp->af_specific->send_check(sk, th, skb->len, skb); |
| 357 | |
| 358 | if (tcb->flags & TCPCB_FLAG_ACK) |
David S. Miller | fc6415bc | 2005-07-05 15:17:45 -0700 | [diff] [blame] | 359 | tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | |
| 361 | if (skb->len != tcp_header_size) |
| 362 | tcp_event_data_sent(tp, skb, sk); |
| 363 | |
| 364 | TCP_INC_STATS(TCP_MIB_OUTSEGS); |
| 365 | |
| 366 | err = tp->af_specific->queue_xmit(skb, 0); |
| 367 | if (err <= 0) |
| 368 | return err; |
| 369 | |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 370 | tcp_enter_cwr(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | |
| 372 | /* NET_XMIT_CN is special. It does not guarantee, |
| 373 | * that this packet is lost. It tells that device |
| 374 | * is about to start to drop packets or already |
| 375 | * drops some packets of the same priority and |
| 376 | * invokes us to send less aggressively. |
| 377 | */ |
| 378 | return err == NET_XMIT_CN ? 0 : err; |
| 379 | } |
| 380 | return -ENOBUFS; |
| 381 | #undef SYSCTL_FLAG_TSTAMPS |
| 382 | #undef SYSCTL_FLAG_WSCALE |
| 383 | #undef SYSCTL_FLAG_SACK |
| 384 | } |
| 385 | |
| 386 | |
| 387 | /* This routine just queue's the buffer |
| 388 | * |
| 389 | * NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames, |
| 390 | * otherwise socket can stall. |
| 391 | */ |
| 392 | static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) |
| 393 | { |
| 394 | struct tcp_sock *tp = tcp_sk(sk); |
| 395 | |
| 396 | /* Advance write_seq and place onto the write_queue. */ |
| 397 | tp->write_seq = TCP_SKB_CB(skb)->end_seq; |
| 398 | skb_header_release(skb); |
| 399 | __skb_queue_tail(&sk->sk_write_queue, skb); |
| 400 | sk_charge_skb(sk, skb); |
| 401 | |
| 402 | /* Queue it, remembering where we must start sending. */ |
| 403 | if (sk->sk_send_head == NULL) |
| 404 | sk->sk_send_head = skb; |
| 405 | } |
| 406 | |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 407 | static void tcp_set_skb_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) |
David S. Miller | f6302d1 | 2005-07-05 15:18:03 -0700 | [diff] [blame] | 408 | { |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 409 | if (skb->len <= mss_now || |
David S. Miller | f6302d1 | 2005-07-05 15:18:03 -0700 | [diff] [blame] | 410 | !(sk->sk_route_caps & NETIF_F_TSO)) { |
| 411 | /* Avoid the costly divide in the normal |
| 412 | * non-TSO case. |
| 413 | */ |
| 414 | skb_shinfo(skb)->tso_segs = 1; |
| 415 | skb_shinfo(skb)->tso_size = 0; |
| 416 | } else { |
| 417 | unsigned int factor; |
| 418 | |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 419 | factor = skb->len + (mss_now - 1); |
| 420 | factor /= mss_now; |
David S. Miller | f6302d1 | 2005-07-05 15:18:03 -0700 | [diff] [blame] | 421 | skb_shinfo(skb)->tso_segs = factor; |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 422 | skb_shinfo(skb)->tso_size = mss_now; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 423 | } |
| 424 | } |
| 425 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | /* Function to create two new TCP segments. Shrinks the given segment |
| 427 | * to the specified size and appends a new segment with the rest of the |
| 428 | * packet to the list. This won't be called frequently, I hope. |
| 429 | * Remember, these are still headerless SKBs at this point. |
| 430 | */ |
David S. Miller | 6475be1 | 2005-09-01 22:47:01 -0700 | [diff] [blame] | 431 | int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss_now) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 432 | { |
| 433 | struct tcp_sock *tp = tcp_sk(sk); |
| 434 | struct sk_buff *buff; |
David S. Miller | 6475be1 | 2005-09-01 22:47:01 -0700 | [diff] [blame] | 435 | int nsize, old_factor; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | u16 flags; |
| 437 | |
Herbert Xu | 3c05d92 | 2005-09-14 20:50:35 -0700 | [diff] [blame] | 438 | BUG_ON(len >= skb->len); |
| 439 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 440 | nsize = skb_headlen(skb) - len; |
| 441 | if (nsize < 0) |
| 442 | nsize = 0; |
| 443 | |
| 444 | if (skb_cloned(skb) && |
| 445 | skb_is_nonlinear(skb) && |
| 446 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) |
| 447 | return -ENOMEM; |
| 448 | |
| 449 | /* Get a new skb... force flag on. */ |
| 450 | buff = sk_stream_alloc_skb(sk, nsize, GFP_ATOMIC); |
| 451 | if (buff == NULL) |
| 452 | return -ENOMEM; /* We'll just try again later. */ |
| 453 | sk_charge_skb(sk, buff); |
| 454 | |
| 455 | /* Correct the sequence numbers. */ |
| 456 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; |
| 457 | TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; |
| 458 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; |
| 459 | |
| 460 | /* PSH and FIN should only be set in the second packet. */ |
| 461 | flags = TCP_SKB_CB(skb)->flags; |
| 462 | TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); |
| 463 | TCP_SKB_CB(buff)->flags = flags; |
Herbert Xu | e14c3ca | 2005-09-19 18:18:38 -0700 | [diff] [blame^] | 464 | TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 465 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_AT_TAIL; |
| 466 | |
| 467 | if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_HW) { |
| 468 | /* Copy and checksum data tail into the new buffer. */ |
| 469 | buff->csum = csum_partial_copy_nocheck(skb->data + len, skb_put(buff, nsize), |
| 470 | nsize, 0); |
| 471 | |
| 472 | skb_trim(skb, len); |
| 473 | |
| 474 | skb->csum = csum_block_sub(skb->csum, buff->csum, len); |
| 475 | } else { |
| 476 | skb->ip_summed = CHECKSUM_HW; |
| 477 | skb_split(skb, buff, len); |
| 478 | } |
| 479 | |
| 480 | buff->ip_summed = skb->ip_summed; |
| 481 | |
| 482 | /* Looks stupid, but our code really uses when of |
| 483 | * skbs, which it never sent before. --ANK |
| 484 | */ |
| 485 | TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; |
Patrick McHardy | a61bbcf | 2005-08-14 17:24:31 -0700 | [diff] [blame] | 486 | buff->tstamp = skb->tstamp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 487 | |
David S. Miller | 6475be1 | 2005-09-01 22:47:01 -0700 | [diff] [blame] | 488 | old_factor = tcp_skb_pcount(skb); |
| 489 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | /* Fix up tso_factor for both original and new SKB. */ |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 491 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
| 492 | tcp_set_skb_tso_segs(sk, buff, mss_now); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 493 | |
David S. Miller | 6475be1 | 2005-09-01 22:47:01 -0700 | [diff] [blame] | 494 | /* If this packet has been sent out already, we must |
| 495 | * adjust the various packet counters. |
| 496 | */ |
Herbert Xu | cf0b450 | 2005-09-08 15:10:52 -0700 | [diff] [blame] | 497 | if (!before(tp->snd_nxt, TCP_SKB_CB(buff)->end_seq)) { |
David S. Miller | 6475be1 | 2005-09-01 22:47:01 -0700 | [diff] [blame] | 498 | int diff = old_factor - tcp_skb_pcount(skb) - |
| 499 | tcp_skb_pcount(buff); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 500 | |
David S. Miller | 6475be1 | 2005-09-01 22:47:01 -0700 | [diff] [blame] | 501 | tp->packets_out -= diff; |
Herbert Xu | e14c3ca | 2005-09-19 18:18:38 -0700 | [diff] [blame^] | 502 | |
| 503 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) |
| 504 | tp->sacked_out -= diff; |
| 505 | if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) |
| 506 | tp->retrans_out -= diff; |
| 507 | |
David S. Miller | 6475be1 | 2005-09-01 22:47:01 -0700 | [diff] [blame] | 508 | if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) { |
| 509 | tp->lost_out -= diff; |
| 510 | tp->left_out -= diff; |
| 511 | } |
| 512 | if (diff > 0) { |
| 513 | tp->fackets_out -= diff; |
| 514 | if ((int)tp->fackets_out < 0) |
| 515 | tp->fackets_out = 0; |
| 516 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | } |
| 518 | |
| 519 | /* Link BUFF into the send queue. */ |
David S. Miller | f44b527 | 2005-07-05 15:18:34 -0700 | [diff] [blame] | 520 | skb_header_release(buff); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 521 | __skb_append(skb, buff, &sk->sk_write_queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | |
| 523 | return 0; |
| 524 | } |
| 525 | |
| 526 | /* This is similar to __pskb_pull_head() (it will go to core/skbuff.c |
| 527 | * eventually). The difference is that pulled data not copied, but |
| 528 | * immediately discarded. |
| 529 | */ |
| 530 | static unsigned char *__pskb_trim_head(struct sk_buff *skb, int len) |
| 531 | { |
| 532 | int i, k, eat; |
| 533 | |
| 534 | eat = len; |
| 535 | k = 0; |
| 536 | for (i=0; i<skb_shinfo(skb)->nr_frags; i++) { |
| 537 | if (skb_shinfo(skb)->frags[i].size <= eat) { |
| 538 | put_page(skb_shinfo(skb)->frags[i].page); |
| 539 | eat -= skb_shinfo(skb)->frags[i].size; |
| 540 | } else { |
| 541 | skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; |
| 542 | if (eat) { |
| 543 | skb_shinfo(skb)->frags[k].page_offset += eat; |
| 544 | skb_shinfo(skb)->frags[k].size -= eat; |
| 545 | eat = 0; |
| 546 | } |
| 547 | k++; |
| 548 | } |
| 549 | } |
| 550 | skb_shinfo(skb)->nr_frags = k; |
| 551 | |
| 552 | skb->tail = skb->data; |
| 553 | skb->data_len -= len; |
| 554 | skb->len = skb->data_len; |
| 555 | return skb->tail; |
| 556 | } |
| 557 | |
| 558 | int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) |
| 559 | { |
| 560 | if (skb_cloned(skb) && |
| 561 | pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) |
| 562 | return -ENOMEM; |
| 563 | |
| 564 | if (len <= skb_headlen(skb)) { |
| 565 | __skb_pull(skb, len); |
| 566 | } else { |
| 567 | if (__pskb_trim_head(skb, len-skb_headlen(skb)) == NULL) |
| 568 | return -ENOMEM; |
| 569 | } |
| 570 | |
| 571 | TCP_SKB_CB(skb)->seq += len; |
| 572 | skb->ip_summed = CHECKSUM_HW; |
| 573 | |
| 574 | skb->truesize -= len; |
| 575 | sk->sk_wmem_queued -= len; |
| 576 | sk->sk_forward_alloc += len; |
| 577 | sock_set_flag(sk, SOCK_QUEUE_SHRUNK); |
| 578 | |
| 579 | /* Any change of skb->len requires recalculation of tso |
| 580 | * factor and mss. |
| 581 | */ |
| 582 | if (tcp_skb_pcount(skb) > 1) |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 583 | tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk, 1)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 584 | |
| 585 | return 0; |
| 586 | } |
| 587 | |
| 588 | /* This function synchronize snd mss to current pmtu/exthdr set. |
| 589 | |
| 590 | tp->rx_opt.user_mss is mss set by user by TCP_MAXSEG. It does NOT counts |
| 591 | for TCP options, but includes only bare TCP header. |
| 592 | |
| 593 | tp->rx_opt.mss_clamp is mss negotiated at connection setup. |
| 594 | It is minumum of user_mss and mss received with SYN. |
| 595 | It also does not include TCP options. |
| 596 | |
| 597 | tp->pmtu_cookie is last pmtu, seen by this function. |
| 598 | |
| 599 | tp->mss_cache is current effective sending mss, including |
| 600 | all tcp options except for SACKs. It is evaluated, |
| 601 | taking into account current pmtu, but never exceeds |
| 602 | tp->rx_opt.mss_clamp. |
| 603 | |
| 604 | NOTE1. rfc1122 clearly states that advertised MSS |
| 605 | DOES NOT include either tcp or ip options. |
| 606 | |
| 607 | NOTE2. tp->pmtu_cookie and tp->mss_cache are READ ONLY outside |
| 608 | this function. --ANK (980731) |
| 609 | */ |
| 610 | |
| 611 | unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu) |
| 612 | { |
| 613 | struct tcp_sock *tp = tcp_sk(sk); |
| 614 | int mss_now; |
| 615 | |
| 616 | /* Calculate base mss without TCP options: |
| 617 | It is MMS_S - sizeof(tcphdr) of rfc1122 |
| 618 | */ |
| 619 | mss_now = pmtu - tp->af_specific->net_header_len - sizeof(struct tcphdr); |
| 620 | |
| 621 | /* Clamp it (mss_clamp does not include tcp options) */ |
| 622 | if (mss_now > tp->rx_opt.mss_clamp) |
| 623 | mss_now = tp->rx_opt.mss_clamp; |
| 624 | |
| 625 | /* Now subtract optional transport overhead */ |
| 626 | mss_now -= tp->ext_header_len; |
| 627 | |
| 628 | /* Then reserve room for full set of TCP options and 8 bytes of data */ |
| 629 | if (mss_now < 48) |
| 630 | mss_now = 48; |
| 631 | |
| 632 | /* Now subtract TCP options size, not including SACKs */ |
| 633 | mss_now -= tp->tcp_header_len - sizeof(struct tcphdr); |
| 634 | |
| 635 | /* Bound mss with half of window */ |
| 636 | if (tp->max_window && mss_now > (tp->max_window>>1)) |
| 637 | mss_now = max((tp->max_window>>1), 68U - tp->tcp_header_len); |
| 638 | |
| 639 | /* And store cached results */ |
| 640 | tp->pmtu_cookie = pmtu; |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 641 | tp->mss_cache = mss_now; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | |
| 643 | return mss_now; |
| 644 | } |
| 645 | |
| 646 | /* Compute the current effective MSS, taking SACKs and IP options, |
| 647 | * and even PMTU discovery events into account. |
| 648 | * |
| 649 | * LARGESEND note: !urg_mode is overkill, only frames up to snd_up |
| 650 | * cannot be large. However, taking into account rare use of URG, this |
| 651 | * is not a big flaw. |
| 652 | */ |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 653 | unsigned int tcp_current_mss(struct sock *sk, int large_allowed) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | { |
| 655 | struct tcp_sock *tp = tcp_sk(sk); |
| 656 | struct dst_entry *dst = __sk_dst_get(sk); |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 657 | u32 mss_now; |
| 658 | u16 xmit_size_goal; |
| 659 | int doing_tso = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 661 | mss_now = tp->mss_cache; |
| 662 | |
| 663 | if (large_allowed && |
| 664 | (sk->sk_route_caps & NETIF_F_TSO) && |
| 665 | !tp->urg_mode) |
| 666 | doing_tso = 1; |
| 667 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 668 | if (dst) { |
| 669 | u32 mtu = dst_mtu(dst); |
| 670 | if (mtu != tp->pmtu_cookie) |
| 671 | mss_now = tcp_sync_mss(sk, mtu); |
| 672 | } |
| 673 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 | if (tp->rx_opt.eff_sacks) |
| 675 | mss_now -= (TCPOLEN_SACK_BASE_ALIGNED + |
| 676 | (tp->rx_opt.eff_sacks * TCPOLEN_SACK_PERBLOCK)); |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 677 | |
| 678 | xmit_size_goal = mss_now; |
| 679 | |
| 680 | if (doing_tso) { |
| 681 | xmit_size_goal = 65535 - |
| 682 | tp->af_specific->net_header_len - |
| 683 | tp->ext_header_len - tp->tcp_header_len; |
| 684 | |
| 685 | if (tp->max_window && |
| 686 | (xmit_size_goal > (tp->max_window >> 1))) |
| 687 | xmit_size_goal = max((tp->max_window >> 1), |
| 688 | 68U - tp->tcp_header_len); |
| 689 | |
| 690 | xmit_size_goal -= (xmit_size_goal % mss_now); |
| 691 | } |
| 692 | tp->xmit_size_goal = xmit_size_goal; |
| 693 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 694 | return mss_now; |
| 695 | } |
| 696 | |
David S. Miller | a762a98 | 2005-07-05 15:18:51 -0700 | [diff] [blame] | 697 | /* Congestion window validation. (RFC2861) */ |
| 698 | |
| 699 | static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) |
| 700 | { |
| 701 | __u32 packets_out = tp->packets_out; |
| 702 | |
| 703 | if (packets_out >= tp->snd_cwnd) { |
| 704 | /* Network is feed fully. */ |
| 705 | tp->snd_cwnd_used = 0; |
| 706 | tp->snd_cwnd_stamp = tcp_time_stamp; |
| 707 | } else { |
| 708 | /* Network starves. */ |
| 709 | if (tp->packets_out > tp->snd_cwnd_used) |
| 710 | tp->snd_cwnd_used = tp->packets_out; |
| 711 | |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 712 | if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto) |
David S. Miller | a762a98 | 2005-07-05 15:18:51 -0700 | [diff] [blame] | 713 | tcp_cwnd_application_limited(sk); |
| 714 | } |
| 715 | } |
| 716 | |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 717 | static unsigned int tcp_window_allows(struct tcp_sock *tp, struct sk_buff *skb, unsigned int mss_now, unsigned int cwnd) |
| 718 | { |
| 719 | u32 window, cwnd_len; |
| 720 | |
| 721 | window = (tp->snd_una + tp->snd_wnd - TCP_SKB_CB(skb)->seq); |
| 722 | cwnd_len = mss_now * cwnd; |
| 723 | return min(window, cwnd_len); |
| 724 | } |
| 725 | |
| 726 | /* Can at least one segment of SKB be sent right now, according to the |
| 727 | * congestion window rules? If so, return how many segments are allowed. |
| 728 | */ |
| 729 | static inline unsigned int tcp_cwnd_test(struct tcp_sock *tp, struct sk_buff *skb) |
| 730 | { |
| 731 | u32 in_flight, cwnd; |
| 732 | |
| 733 | /* Don't be strict about the congestion window for the final FIN. */ |
| 734 | if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) |
| 735 | return 1; |
| 736 | |
| 737 | in_flight = tcp_packets_in_flight(tp); |
| 738 | cwnd = tp->snd_cwnd; |
| 739 | if (in_flight < cwnd) |
| 740 | return (cwnd - in_flight); |
| 741 | |
| 742 | return 0; |
| 743 | } |
| 744 | |
| 745 | /* This must be invoked the first time we consider transmitting |
| 746 | * SKB onto the wire. |
| 747 | */ |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 748 | static inline int tcp_init_tso_segs(struct sock *sk, struct sk_buff *skb, unsigned int mss_now) |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 749 | { |
| 750 | int tso_segs = tcp_skb_pcount(skb); |
| 751 | |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 752 | if (!tso_segs || |
| 753 | (tso_segs > 1 && |
| 754 | skb_shinfo(skb)->tso_size != mss_now)) { |
| 755 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 756 | tso_segs = tcp_skb_pcount(skb); |
| 757 | } |
| 758 | return tso_segs; |
| 759 | } |
| 760 | |
| 761 | static inline int tcp_minshall_check(const struct tcp_sock *tp) |
| 762 | { |
| 763 | return after(tp->snd_sml,tp->snd_una) && |
| 764 | !after(tp->snd_sml, tp->snd_nxt); |
| 765 | } |
| 766 | |
| 767 | /* Return 0, if packet can be sent now without violation Nagle's rules: |
| 768 | * 1. It is full sized. |
| 769 | * 2. Or it contains FIN. (already checked by caller) |
| 770 | * 3. Or TCP_NODELAY was set. |
| 771 | * 4. Or TCP_CORK is not set, and all sent packets are ACKed. |
| 772 | * With Minshall's modification: all sent small packets are ACKed. |
| 773 | */ |
| 774 | |
| 775 | static inline int tcp_nagle_check(const struct tcp_sock *tp, |
| 776 | const struct sk_buff *skb, |
| 777 | unsigned mss_now, int nonagle) |
| 778 | { |
| 779 | return (skb->len < mss_now && |
| 780 | ((nonagle&TCP_NAGLE_CORK) || |
| 781 | (!nonagle && |
| 782 | tp->packets_out && |
| 783 | tcp_minshall_check(tp)))); |
| 784 | } |
| 785 | |
| 786 | /* Return non-zero if the Nagle test allows this packet to be |
| 787 | * sent now. |
| 788 | */ |
| 789 | static inline int tcp_nagle_test(struct tcp_sock *tp, struct sk_buff *skb, |
| 790 | unsigned int cur_mss, int nonagle) |
| 791 | { |
| 792 | /* Nagle rule does not apply to frames, which sit in the middle of the |
| 793 | * write_queue (they have no chances to get new data). |
| 794 | * |
| 795 | * This is implemented in the callers, where they modify the 'nonagle' |
| 796 | * argument based upon the location of SKB in the send queue. |
| 797 | */ |
| 798 | if (nonagle & TCP_NAGLE_PUSH) |
| 799 | return 1; |
| 800 | |
| 801 | /* Don't use the nagle rule for urgent data (or for the final FIN). */ |
| 802 | if (tp->urg_mode || |
| 803 | (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) |
| 804 | return 1; |
| 805 | |
| 806 | if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) |
| 807 | return 1; |
| 808 | |
| 809 | return 0; |
| 810 | } |
| 811 | |
| 812 | /* Does at least the first segment of SKB fit into the send window? */ |
| 813 | static inline int tcp_snd_wnd_test(struct tcp_sock *tp, struct sk_buff *skb, unsigned int cur_mss) |
| 814 | { |
| 815 | u32 end_seq = TCP_SKB_CB(skb)->end_seq; |
| 816 | |
| 817 | if (skb->len > cur_mss) |
| 818 | end_seq = TCP_SKB_CB(skb)->seq + cur_mss; |
| 819 | |
| 820 | return !after(end_seq, tp->snd_una + tp->snd_wnd); |
| 821 | } |
| 822 | |
| 823 | /* This checks if the data bearing packet SKB (usually sk->sk_send_head) |
| 824 | * should be put on the wire right now. If so, it returns the number of |
| 825 | * packets allowed by the congestion window. |
| 826 | */ |
| 827 | static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, |
| 828 | unsigned int cur_mss, int nonagle) |
| 829 | { |
| 830 | struct tcp_sock *tp = tcp_sk(sk); |
| 831 | unsigned int cwnd_quota; |
| 832 | |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 833 | tcp_init_tso_segs(sk, skb, cur_mss); |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 834 | |
| 835 | if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) |
| 836 | return 0; |
| 837 | |
| 838 | cwnd_quota = tcp_cwnd_test(tp, skb); |
| 839 | if (cwnd_quota && |
| 840 | !tcp_snd_wnd_test(tp, skb, cur_mss)) |
| 841 | cwnd_quota = 0; |
| 842 | |
| 843 | return cwnd_quota; |
| 844 | } |
| 845 | |
| 846 | static inline int tcp_skb_is_last(const struct sock *sk, |
| 847 | const struct sk_buff *skb) |
| 848 | { |
| 849 | return skb->next == (struct sk_buff *)&sk->sk_write_queue; |
| 850 | } |
| 851 | |
| 852 | int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) |
| 853 | { |
| 854 | struct sk_buff *skb = sk->sk_send_head; |
| 855 | |
| 856 | return (skb && |
| 857 | tcp_snd_test(sk, skb, tcp_current_mss(sk, 1), |
| 858 | (tcp_skb_is_last(sk, skb) ? |
| 859 | TCP_NAGLE_PUSH : |
| 860 | tp->nonagle))); |
| 861 | } |
| 862 | |
| 863 | /* Trim TSO SKB to LEN bytes, put the remaining data into a new packet |
| 864 | * which is put after SKB on the list. It is very much like |
| 865 | * tcp_fragment() except that it may make several kinds of assumptions |
| 866 | * in order to speed up the splitting operation. In particular, we |
| 867 | * know that all the data is in scatter-gather pages, and that the |
| 868 | * packet has never been sent out before (and thus is not cloned). |
| 869 | */ |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 870 | static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, unsigned int mss_now) |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 871 | { |
| 872 | struct sk_buff *buff; |
| 873 | int nlen = skb->len - len; |
| 874 | u16 flags; |
| 875 | |
| 876 | /* All of a TSO frame must be composed of paged data. */ |
Herbert Xu | c8ac377 | 2005-08-16 20:43:40 -0700 | [diff] [blame] | 877 | if (skb->len != skb->data_len) |
| 878 | return tcp_fragment(sk, skb, len, mss_now); |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 879 | |
| 880 | buff = sk_stream_alloc_pskb(sk, 0, 0, GFP_ATOMIC); |
| 881 | if (unlikely(buff == NULL)) |
| 882 | return -ENOMEM; |
| 883 | |
| 884 | buff->truesize = nlen; |
| 885 | skb->truesize -= nlen; |
| 886 | |
| 887 | /* Correct the sequence numbers. */ |
| 888 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; |
| 889 | TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; |
| 890 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; |
| 891 | |
| 892 | /* PSH and FIN should only be set in the second packet. */ |
| 893 | flags = TCP_SKB_CB(skb)->flags; |
| 894 | TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN|TCPCB_FLAG_PSH); |
| 895 | TCP_SKB_CB(buff)->flags = flags; |
| 896 | |
| 897 | /* This packet was never sent out yet, so no SACK bits. */ |
| 898 | TCP_SKB_CB(buff)->sacked = 0; |
| 899 | |
| 900 | buff->ip_summed = skb->ip_summed = CHECKSUM_HW; |
| 901 | skb_split(skb, buff, len); |
| 902 | |
| 903 | /* Fix up tso_factor for both original and new SKB. */ |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 904 | tcp_set_skb_tso_segs(sk, skb, mss_now); |
| 905 | tcp_set_skb_tso_segs(sk, buff, mss_now); |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 906 | |
| 907 | /* Link BUFF into the send queue. */ |
| 908 | skb_header_release(buff); |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 909 | __skb_append(skb, buff, &sk->sk_write_queue); |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 910 | |
| 911 | return 0; |
| 912 | } |
| 913 | |
| 914 | /* Try to defer sending, if possible, in order to minimize the amount |
| 915 | * of TSO splitting we do. View it as a kind of TSO Nagle test. |
| 916 | * |
| 917 | * This algorithm is from John Heffner. |
| 918 | */ |
| 919 | static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) |
| 920 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 921 | const struct inet_connection_sock *icsk = inet_csk(sk); |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 922 | u32 send_win, cong_win, limit, in_flight; |
| 923 | |
| 924 | if (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) |
| 925 | return 0; |
| 926 | |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 927 | if (icsk->icsk_ca_state != TCP_CA_Open) |
David S. Miller | 908a75c | 2005-07-05 15:43:58 -0700 | [diff] [blame] | 928 | return 0; |
| 929 | |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 930 | in_flight = tcp_packets_in_flight(tp); |
| 931 | |
| 932 | BUG_ON(tcp_skb_pcount(skb) <= 1 || |
| 933 | (tp->snd_cwnd <= in_flight)); |
| 934 | |
| 935 | send_win = (tp->snd_una + tp->snd_wnd) - TCP_SKB_CB(skb)->seq; |
| 936 | |
| 937 | /* From in_flight test above, we know that cwnd > in_flight. */ |
| 938 | cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache; |
| 939 | |
| 940 | limit = min(send_win, cong_win); |
| 941 | |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 942 | if (sysctl_tcp_tso_win_divisor) { |
| 943 | u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache); |
| 944 | |
| 945 | /* If at least some fraction of a window is available, |
| 946 | * just use it. |
| 947 | */ |
| 948 | chunk /= sysctl_tcp_tso_win_divisor; |
| 949 | if (limit >= chunk) |
| 950 | return 0; |
| 951 | } else { |
| 952 | /* Different approach, try not to defer past a single |
| 953 | * ACK. Receiver should ACK every other full sized |
| 954 | * frame, so if we have space for more than 3 frames |
| 955 | * then send now. |
| 956 | */ |
| 957 | if (limit > tcp_max_burst(tp) * tp->mss_cache) |
| 958 | return 0; |
| 959 | } |
| 960 | |
| 961 | /* Ok, it looks like it is advisable to defer. */ |
| 962 | return 1; |
| 963 | } |
| 964 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 965 | /* This routine writes packets to the network. It advances the |
| 966 | * send_head. This happens as incoming acks open up the remote |
| 967 | * window for us. |
| 968 | * |
| 969 | * Returns 1, if no segments are in flight and we have queued segments, but |
| 970 | * cannot send anything now because of SWS or another problem. |
| 971 | */ |
David S. Miller | a2e2a59 | 2005-07-05 15:19:23 -0700 | [diff] [blame] | 972 | static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 973 | { |
| 974 | struct tcp_sock *tp = tcp_sk(sk); |
David S. Miller | 92df7b5 | 2005-07-05 15:19:06 -0700 | [diff] [blame] | 975 | struct sk_buff *skb; |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 976 | unsigned int tso_segs, sent_pkts; |
| 977 | int cwnd_quota; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 978 | |
| 979 | /* If we are closed, the bytes will have to remain here. |
| 980 | * In time closedown will finish, we empty the write queue and all |
| 981 | * will be happy. |
| 982 | */ |
David S. Miller | 92df7b5 | 2005-07-05 15:19:06 -0700 | [diff] [blame] | 983 | if (unlikely(sk->sk_state == TCP_CLOSE)) |
| 984 | return 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 985 | |
David S. Miller | 92df7b5 | 2005-07-05 15:19:06 -0700 | [diff] [blame] | 986 | sent_pkts = 0; |
Herbert Xu | b68e9f8 | 2005-08-04 19:52:02 -0700 | [diff] [blame] | 987 | while ((skb = sk->sk_send_head)) { |
Herbert Xu | c8ac377 | 2005-08-16 20:43:40 -0700 | [diff] [blame] | 988 | unsigned int limit; |
| 989 | |
Herbert Xu | b68e9f8 | 2005-08-04 19:52:02 -0700 | [diff] [blame] | 990 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 991 | BUG_ON(!tso_segs); |
David S. Miller | aa93466 | 2005-07-05 15:20:09 -0700 | [diff] [blame] | 992 | |
Herbert Xu | b68e9f8 | 2005-08-04 19:52:02 -0700 | [diff] [blame] | 993 | cwnd_quota = tcp_cwnd_test(tp, skb); |
| 994 | if (!cwnd_quota) |
| 995 | break; |
| 996 | |
| 997 | if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) |
| 998 | break; |
| 999 | |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 1000 | if (tso_segs == 1) { |
| 1001 | if (unlikely(!tcp_nagle_test(tp, skb, mss_now, |
| 1002 | (tcp_skb_is_last(sk, skb) ? |
| 1003 | nonagle : TCP_NAGLE_PUSH)))) |
| 1004 | break; |
| 1005 | } else { |
| 1006 | if (tcp_tso_should_defer(sk, tp, skb)) |
| 1007 | break; |
| 1008 | } |
David S. Miller | aa93466 | 2005-07-05 15:20:09 -0700 | [diff] [blame] | 1009 | |
Herbert Xu | c8ac377 | 2005-08-16 20:43:40 -0700 | [diff] [blame] | 1010 | limit = mss_now; |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 1011 | if (tso_segs > 1) { |
Herbert Xu | c8ac377 | 2005-08-16 20:43:40 -0700 | [diff] [blame] | 1012 | limit = tcp_window_allows(tp, skb, |
| 1013 | mss_now, cwnd_quota); |
David S. Miller | aa93466 | 2005-07-05 15:20:09 -0700 | [diff] [blame] | 1014 | |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 1015 | if (skb->len < limit) { |
| 1016 | unsigned int trim = skb->len % mss_now; |
| 1017 | |
| 1018 | if (trim) |
| 1019 | limit = skb->len - trim; |
| 1020 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1021 | } |
| 1022 | |
Herbert Xu | c8ac377 | 2005-08-16 20:43:40 -0700 | [diff] [blame] | 1023 | if (skb->len > limit && |
| 1024 | unlikely(tso_fragment(sk, skb, limit, mss_now))) |
| 1025 | break; |
| 1026 | |
David S. Miller | 92df7b5 | 2005-07-05 15:19:06 -0700 | [diff] [blame] | 1027 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 1028 | |
David S. Miller | aa93466 | 2005-07-05 15:20:09 -0700 | [diff] [blame] | 1029 | if (unlikely(tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)))) |
David S. Miller | 92df7b5 | 2005-07-05 15:19:06 -0700 | [diff] [blame] | 1030 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1031 | |
David S. Miller | 92df7b5 | 2005-07-05 15:19:06 -0700 | [diff] [blame] | 1032 | /* Advance the send_head. This one is sent out. |
| 1033 | * This call will increment packets_out. |
| 1034 | */ |
| 1035 | update_send_head(sk, tp, skb); |
| 1036 | |
| 1037 | tcp_minshall_update(tp, mss_now, skb); |
David S. Miller | aa93466 | 2005-07-05 15:20:09 -0700 | [diff] [blame] | 1038 | sent_pkts++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1039 | } |
David S. Miller | 92df7b5 | 2005-07-05 15:19:06 -0700 | [diff] [blame] | 1040 | |
David S. Miller | aa93466 | 2005-07-05 15:20:09 -0700 | [diff] [blame] | 1041 | if (likely(sent_pkts)) { |
David S. Miller | 92df7b5 | 2005-07-05 15:19:06 -0700 | [diff] [blame] | 1042 | tcp_cwnd_validate(sk, tp); |
| 1043 | return 0; |
| 1044 | } |
David S. Miller | 92df7b5 | 2005-07-05 15:19:06 -0700 | [diff] [blame] | 1045 | return !tp->packets_out && sk->sk_send_head; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1046 | } |
| 1047 | |
David S. Miller | a762a98 | 2005-07-05 15:18:51 -0700 | [diff] [blame] | 1048 | /* Push out any pending frames which were held back due to |
| 1049 | * TCP_CORK or attempt at coalescing tiny packets. |
| 1050 | * The socket must be locked by the caller. |
| 1051 | */ |
| 1052 | void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, |
David S. Miller | a2e2a59 | 2005-07-05 15:19:23 -0700 | [diff] [blame] | 1053 | unsigned int cur_mss, int nonagle) |
David S. Miller | a762a98 | 2005-07-05 15:18:51 -0700 | [diff] [blame] | 1054 | { |
| 1055 | struct sk_buff *skb = sk->sk_send_head; |
| 1056 | |
| 1057 | if (skb) { |
David S. Miller | 55c97f3 | 2005-07-05 15:19:38 -0700 | [diff] [blame] | 1058 | if (tcp_write_xmit(sk, cur_mss, nonagle)) |
David S. Miller | a762a98 | 2005-07-05 15:18:51 -0700 | [diff] [blame] | 1059 | tcp_check_probe_timer(sk, tp); |
| 1060 | } |
| 1061 | } |
| 1062 | |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 1063 | /* Send _single_ skb sitting at the send head. This function requires |
| 1064 | * true push pending frames to setup probe timer etc. |
| 1065 | */ |
| 1066 | void tcp_push_one(struct sock *sk, unsigned int mss_now) |
| 1067 | { |
| 1068 | struct tcp_sock *tp = tcp_sk(sk); |
| 1069 | struct sk_buff *skb = sk->sk_send_head; |
| 1070 | unsigned int tso_segs, cwnd_quota; |
| 1071 | |
| 1072 | BUG_ON(!skb || skb->len < mss_now); |
| 1073 | |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 1074 | tso_segs = tcp_init_tso_segs(sk, skb, mss_now); |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 1075 | cwnd_quota = tcp_snd_test(sk, skb, mss_now, TCP_NAGLE_PUSH); |
| 1076 | |
| 1077 | if (likely(cwnd_quota)) { |
Herbert Xu | c8ac377 | 2005-08-16 20:43:40 -0700 | [diff] [blame] | 1078 | unsigned int limit; |
| 1079 | |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 1080 | BUG_ON(!tso_segs); |
| 1081 | |
Herbert Xu | c8ac377 | 2005-08-16 20:43:40 -0700 | [diff] [blame] | 1082 | limit = mss_now; |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 1083 | if (tso_segs > 1) { |
Herbert Xu | c8ac377 | 2005-08-16 20:43:40 -0700 | [diff] [blame] | 1084 | limit = tcp_window_allows(tp, skb, |
| 1085 | mss_now, cwnd_quota); |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 1086 | |
| 1087 | if (skb->len < limit) { |
| 1088 | unsigned int trim = skb->len % mss_now; |
| 1089 | |
| 1090 | if (trim) |
| 1091 | limit = skb->len - trim; |
| 1092 | } |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 1093 | } |
| 1094 | |
Herbert Xu | c8ac377 | 2005-08-16 20:43:40 -0700 | [diff] [blame] | 1095 | if (skb->len > limit && |
| 1096 | unlikely(tso_fragment(sk, skb, limit, mss_now))) |
| 1097 | return; |
| 1098 | |
David S. Miller | c1b4a7e | 2005-07-05 15:24:38 -0700 | [diff] [blame] | 1099 | /* Send it out now. */ |
| 1100 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
| 1101 | |
| 1102 | if (likely(!tcp_transmit_skb(sk, skb_clone(skb, sk->sk_allocation)))) { |
| 1103 | update_send_head(sk, tp, skb); |
| 1104 | tcp_cwnd_validate(sk, tp); |
| 1105 | return; |
| 1106 | } |
| 1107 | } |
| 1108 | } |
| 1109 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1110 | /* This function returns the amount that we can raise the |
| 1111 | * usable window based on the following constraints |
| 1112 | * |
| 1113 | * 1. The window can never be shrunk once it is offered (RFC 793) |
| 1114 | * 2. We limit memory per socket |
| 1115 | * |
| 1116 | * RFC 1122: |
| 1117 | * "the suggested [SWS] avoidance algorithm for the receiver is to keep |
| 1118 | * RECV.NEXT + RCV.WIN fixed until: |
| 1119 | * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" |
| 1120 | * |
| 1121 | * i.e. don't raise the right edge of the window until you can raise |
| 1122 | * it at least MSS bytes. |
| 1123 | * |
| 1124 | * Unfortunately, the recommended algorithm breaks header prediction, |
| 1125 | * since header prediction assumes th->window stays fixed. |
| 1126 | * |
| 1127 | * Strictly speaking, keeping th->window fixed violates the receiver |
| 1128 | * side SWS prevention criteria. The problem is that under this rule |
| 1129 | * a stream of single byte packets will cause the right side of the |
| 1130 | * window to always advance by a single byte. |
| 1131 | * |
| 1132 | * Of course, if the sender implements sender side SWS prevention |
| 1133 | * then this will not be a problem. |
| 1134 | * |
| 1135 | * BSD seems to make the following compromise: |
| 1136 | * |
| 1137 | * If the free space is less than the 1/4 of the maximum |
| 1138 | * space available and the free space is less than 1/2 mss, |
| 1139 | * then set the window to 0. |
| 1140 | * [ Actually, bsd uses MSS and 1/4 of maximal _window_ ] |
| 1141 | * Otherwise, just prevent the window from shrinking |
| 1142 | * and from being larger than the largest representable value. |
| 1143 | * |
| 1144 | * This prevents incremental opening of the window in the regime |
| 1145 | * where TCP is limited by the speed of the reader side taking |
| 1146 | * data out of the TCP receive queue. It does nothing about |
| 1147 | * those cases where the window is constrained on the sender side |
| 1148 | * because the pipeline is full. |
| 1149 | * |
| 1150 | * BSD also seems to "accidentally" limit itself to windows that are a |
| 1151 | * multiple of MSS, at least until the free space gets quite small. |
| 1152 | * This would appear to be a side effect of the mbuf implementation. |
| 1153 | * Combining these two algorithms results in the observed behavior |
| 1154 | * of having a fixed window size at almost all times. |
| 1155 | * |
| 1156 | * Below we obtain similar behavior by forcing the offered window to |
| 1157 | * a multiple of the mss when it is feasible to do so. |
| 1158 | * |
| 1159 | * Note, we don't "adjust" for TIMESTAMP or SACK option bytes. |
| 1160 | * Regular options like TIMESTAMP are taken into account. |
| 1161 | */ |
| 1162 | u32 __tcp_select_window(struct sock *sk) |
| 1163 | { |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1164 | struct inet_connection_sock *icsk = inet_csk(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1165 | struct tcp_sock *tp = tcp_sk(sk); |
| 1166 | /* MSS for the peer's data. Previous verions used mss_clamp |
| 1167 | * here. I don't know if the value based on our guesses |
| 1168 | * of peer's MSS is better for the performance. It's more correct |
| 1169 | * but may be worse for the performance because of rcv_mss |
| 1170 | * fluctuations. --SAW 1998/11/1 |
| 1171 | */ |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1172 | int mss = icsk->icsk_ack.rcv_mss; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1173 | int free_space = tcp_space(sk); |
| 1174 | int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk)); |
| 1175 | int window; |
| 1176 | |
| 1177 | if (mss > full_space) |
| 1178 | mss = full_space; |
| 1179 | |
| 1180 | if (free_space < full_space/2) { |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1181 | icsk->icsk_ack.quick = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1182 | |
| 1183 | if (tcp_memory_pressure) |
| 1184 | tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss); |
| 1185 | |
| 1186 | if (free_space < mss) |
| 1187 | return 0; |
| 1188 | } |
| 1189 | |
| 1190 | if (free_space > tp->rcv_ssthresh) |
| 1191 | free_space = tp->rcv_ssthresh; |
| 1192 | |
| 1193 | /* Don't do rounding if we are using window scaling, since the |
| 1194 | * scaled window will not line up with the MSS boundary anyway. |
| 1195 | */ |
| 1196 | window = tp->rcv_wnd; |
| 1197 | if (tp->rx_opt.rcv_wscale) { |
| 1198 | window = free_space; |
| 1199 | |
| 1200 | /* Advertise enough space so that it won't get scaled away. |
| 1201 | * Import case: prevent zero window announcement if |
| 1202 | * 1<<rcv_wscale > mss. |
| 1203 | */ |
| 1204 | if (((window >> tp->rx_opt.rcv_wscale) << tp->rx_opt.rcv_wscale) != window) |
| 1205 | window = (((window >> tp->rx_opt.rcv_wscale) + 1) |
| 1206 | << tp->rx_opt.rcv_wscale); |
| 1207 | } else { |
| 1208 | /* Get the largest window that is a nice multiple of mss. |
| 1209 | * Window clamp already applied above. |
| 1210 | * If our current window offering is within 1 mss of the |
| 1211 | * free space we just keep it. This prevents the divide |
| 1212 | * and multiply from happening most of the time. |
| 1213 | * We also don't do any window rounding when the free space |
| 1214 | * is too small. |
| 1215 | */ |
| 1216 | if (window <= free_space - mss || window > free_space) |
| 1217 | window = (free_space/mss)*mss; |
| 1218 | } |
| 1219 | |
| 1220 | return window; |
| 1221 | } |
| 1222 | |
| 1223 | /* Attempt to collapse two adjacent SKB's during retransmission. */ |
| 1224 | static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now) |
| 1225 | { |
| 1226 | struct tcp_sock *tp = tcp_sk(sk); |
| 1227 | struct sk_buff *next_skb = skb->next; |
| 1228 | |
| 1229 | /* The first test we must make is that neither of these two |
| 1230 | * SKB's are still referenced by someone else. |
| 1231 | */ |
| 1232 | if (!skb_cloned(skb) && !skb_cloned(next_skb)) { |
| 1233 | int skb_size = skb->len, next_skb_size = next_skb->len; |
| 1234 | u16 flags = TCP_SKB_CB(skb)->flags; |
| 1235 | |
| 1236 | /* Also punt if next skb has been SACK'd. */ |
| 1237 | if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED) |
| 1238 | return; |
| 1239 | |
| 1240 | /* Next skb is out of window. */ |
| 1241 | if (after(TCP_SKB_CB(next_skb)->end_seq, tp->snd_una+tp->snd_wnd)) |
| 1242 | return; |
| 1243 | |
| 1244 | /* Punt if not enough space exists in the first SKB for |
| 1245 | * the data in the second, or the total combined payload |
| 1246 | * would exceed the MSS. |
| 1247 | */ |
| 1248 | if ((next_skb_size > skb_tailroom(skb)) || |
| 1249 | ((skb_size + next_skb_size) > mss_now)) |
| 1250 | return; |
| 1251 | |
| 1252 | BUG_ON(tcp_skb_pcount(skb) != 1 || |
| 1253 | tcp_skb_pcount(next_skb) != 1); |
| 1254 | |
| 1255 | /* Ok. We will be able to collapse the packet. */ |
David S. Miller | 8728b83 | 2005-08-09 19:25:21 -0700 | [diff] [blame] | 1256 | __skb_unlink(next_skb, &sk->sk_write_queue); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1257 | |
| 1258 | memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size); |
| 1259 | |
| 1260 | if (next_skb->ip_summed == CHECKSUM_HW) |
| 1261 | skb->ip_summed = CHECKSUM_HW; |
| 1262 | |
| 1263 | if (skb->ip_summed != CHECKSUM_HW) |
| 1264 | skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); |
| 1265 | |
| 1266 | /* Update sequence range on original skb. */ |
| 1267 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; |
| 1268 | |
| 1269 | /* Merge over control information. */ |
| 1270 | flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */ |
| 1271 | TCP_SKB_CB(skb)->flags = flags; |
| 1272 | |
| 1273 | /* All done, get rid of second SKB and account for it so |
| 1274 | * packet counting does not break. |
| 1275 | */ |
| 1276 | TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked&(TCPCB_EVER_RETRANS|TCPCB_AT_TAIL); |
| 1277 | if (TCP_SKB_CB(next_skb)->sacked&TCPCB_SACKED_RETRANS) |
| 1278 | tp->retrans_out -= tcp_skb_pcount(next_skb); |
| 1279 | if (TCP_SKB_CB(next_skb)->sacked&TCPCB_LOST) { |
| 1280 | tp->lost_out -= tcp_skb_pcount(next_skb); |
| 1281 | tp->left_out -= tcp_skb_pcount(next_skb); |
| 1282 | } |
| 1283 | /* Reno case is special. Sigh... */ |
| 1284 | if (!tp->rx_opt.sack_ok && tp->sacked_out) { |
| 1285 | tcp_dec_pcount_approx(&tp->sacked_out, next_skb); |
| 1286 | tp->left_out -= tcp_skb_pcount(next_skb); |
| 1287 | } |
| 1288 | |
| 1289 | /* Not quite right: it can be > snd.fack, but |
| 1290 | * it is better to underestimate fackets. |
| 1291 | */ |
| 1292 | tcp_dec_pcount_approx(&tp->fackets_out, next_skb); |
| 1293 | tcp_packets_out_dec(tp, next_skb); |
| 1294 | sk_stream_free_skb(sk, next_skb); |
| 1295 | } |
| 1296 | } |
| 1297 | |
| 1298 | /* Do a simple retransmit without using the backoff mechanisms in |
| 1299 | * tcp_timer. This is used for path mtu discovery. |
| 1300 | * The socket is already locked here. |
| 1301 | */ |
| 1302 | void tcp_simple_retransmit(struct sock *sk) |
| 1303 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 1304 | const struct inet_connection_sock *icsk = inet_csk(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1305 | struct tcp_sock *tp = tcp_sk(sk); |
| 1306 | struct sk_buff *skb; |
| 1307 | unsigned int mss = tcp_current_mss(sk, 0); |
| 1308 | int lost = 0; |
| 1309 | |
| 1310 | sk_stream_for_retrans_queue(skb, sk) { |
| 1311 | if (skb->len > mss && |
| 1312 | !(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED)) { |
| 1313 | if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { |
| 1314 | TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; |
| 1315 | tp->retrans_out -= tcp_skb_pcount(skb); |
| 1316 | } |
| 1317 | if (!(TCP_SKB_CB(skb)->sacked&TCPCB_LOST)) { |
| 1318 | TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; |
| 1319 | tp->lost_out += tcp_skb_pcount(skb); |
| 1320 | lost = 1; |
| 1321 | } |
| 1322 | } |
| 1323 | } |
| 1324 | |
| 1325 | if (!lost) |
| 1326 | return; |
| 1327 | |
| 1328 | tcp_sync_left_out(tp); |
| 1329 | |
| 1330 | /* Don't muck with the congestion window here. |
| 1331 | * Reason is that we do not increase amount of _data_ |
| 1332 | * in network, but units changed and effective |
| 1333 | * cwnd/ssthresh really reduced now. |
| 1334 | */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 1335 | if (icsk->icsk_ca_state != TCP_CA_Loss) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1336 | tp->high_seq = tp->snd_nxt; |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 1337 | tp->snd_ssthresh = tcp_current_ssthresh(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1338 | tp->prior_ssthresh = 0; |
| 1339 | tp->undo_marker = 0; |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 1340 | tcp_set_ca_state(sk, TCP_CA_Loss); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1341 | } |
| 1342 | tcp_xmit_retransmit_queue(sk); |
| 1343 | } |
| 1344 | |
| 1345 | /* This retransmits one SKB. Policy decisions and retransmit queue |
| 1346 | * state updates are done by the caller. Returns non-zero if an |
| 1347 | * error occurred which prevented the send. |
| 1348 | */ |
| 1349 | int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) |
| 1350 | { |
| 1351 | struct tcp_sock *tp = tcp_sk(sk); |
| 1352 | unsigned int cur_mss = tcp_current_mss(sk, 0); |
| 1353 | int err; |
| 1354 | |
| 1355 | /* Do not sent more than we queued. 1/4 is reserved for possible |
| 1356 | * copying overhead: frgagmentation, tunneling, mangling etc. |
| 1357 | */ |
| 1358 | if (atomic_read(&sk->sk_wmem_alloc) > |
| 1359 | min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) |
| 1360 | return -EAGAIN; |
| 1361 | |
| 1362 | if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { |
| 1363 | if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) |
| 1364 | BUG(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1365 | if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) |
| 1366 | return -ENOMEM; |
| 1367 | } |
| 1368 | |
| 1369 | /* If receiver has shrunk his window, and skb is out of |
| 1370 | * new window, do not retransmit it. The exception is the |
| 1371 | * case, when window is shrunk to zero. In this case |
| 1372 | * our retransmit serves as a zero window probe. |
| 1373 | */ |
| 1374 | if (!before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd) |
| 1375 | && TCP_SKB_CB(skb)->seq != tp->snd_una) |
| 1376 | return -EAGAIN; |
| 1377 | |
| 1378 | if (skb->len > cur_mss) { |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 1379 | if (tcp_fragment(sk, skb, cur_mss, cur_mss)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1380 | return -ENOMEM; /* We'll try again later. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1381 | } |
| 1382 | |
| 1383 | /* Collapse two adjacent packets if worthwhile and we can. */ |
| 1384 | if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) && |
| 1385 | (skb->len < (cur_mss >> 1)) && |
| 1386 | (skb->next != sk->sk_send_head) && |
| 1387 | (skb->next != (struct sk_buff *)&sk->sk_write_queue) && |
| 1388 | (skb_shinfo(skb)->nr_frags == 0 && skb_shinfo(skb->next)->nr_frags == 0) && |
| 1389 | (tcp_skb_pcount(skb) == 1 && tcp_skb_pcount(skb->next) == 1) && |
| 1390 | (sysctl_tcp_retrans_collapse != 0)) |
| 1391 | tcp_retrans_try_collapse(sk, skb, cur_mss); |
| 1392 | |
| 1393 | if(tp->af_specific->rebuild_header(sk)) |
| 1394 | return -EHOSTUNREACH; /* Routing failure or similar. */ |
| 1395 | |
| 1396 | /* Some Solaris stacks overoptimize and ignore the FIN on a |
| 1397 | * retransmit when old data is attached. So strip it off |
| 1398 | * since it is cheap to do so and saves bytes on the network. |
| 1399 | */ |
| 1400 | if(skb->len > 0 && |
| 1401 | (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) && |
| 1402 | tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { |
| 1403 | if (!pskb_trim(skb, 0)) { |
| 1404 | TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1; |
| 1405 | skb_shinfo(skb)->tso_segs = 1; |
| 1406 | skb_shinfo(skb)->tso_size = 0; |
| 1407 | skb->ip_summed = CHECKSUM_NONE; |
| 1408 | skb->csum = 0; |
| 1409 | } |
| 1410 | } |
| 1411 | |
| 1412 | /* Make a copy, if the first transmission SKB clone we made |
| 1413 | * is still in somebody's hands, else make a clone. |
| 1414 | */ |
| 1415 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1416 | |
| 1417 | err = tcp_transmit_skb(sk, (skb_cloned(skb) ? |
| 1418 | pskb_copy(skb, GFP_ATOMIC): |
| 1419 | skb_clone(skb, GFP_ATOMIC))); |
| 1420 | |
| 1421 | if (err == 0) { |
| 1422 | /* Update global TCP statistics. */ |
| 1423 | TCP_INC_STATS(TCP_MIB_RETRANSSEGS); |
| 1424 | |
| 1425 | tp->total_retrans++; |
| 1426 | |
| 1427 | #if FASTRETRANS_DEBUG > 0 |
| 1428 | if (TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS) { |
| 1429 | if (net_ratelimit()) |
| 1430 | printk(KERN_DEBUG "retrans_out leaked.\n"); |
| 1431 | } |
| 1432 | #endif |
| 1433 | TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; |
| 1434 | tp->retrans_out += tcp_skb_pcount(skb); |
| 1435 | |
| 1436 | /* Save stamp of the first retransmit. */ |
| 1437 | if (!tp->retrans_stamp) |
| 1438 | tp->retrans_stamp = TCP_SKB_CB(skb)->when; |
| 1439 | |
| 1440 | tp->undo_retrans++; |
| 1441 | |
| 1442 | /* snd_nxt is stored to detect loss of retransmitted segment, |
| 1443 | * see tcp_input.c tcp_sacktag_write_queue(). |
| 1444 | */ |
| 1445 | TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; |
| 1446 | } |
| 1447 | return err; |
| 1448 | } |
| 1449 | |
| 1450 | /* This gets called after a retransmit timeout, and the initially |
| 1451 | * retransmitted data is acknowledged. It tries to continue |
| 1452 | * resending the rest of the retransmit queue, until either |
| 1453 | * we've sent it all or the congestion window limit is reached. |
| 1454 | * If doing SACK, the first ACK which comes back for a timeout |
| 1455 | * based retransmit packet might feed us FACK information again. |
| 1456 | * If so, we use it to avoid unnecessarily retransmissions. |
| 1457 | */ |
| 1458 | void tcp_xmit_retransmit_queue(struct sock *sk) |
| 1459 | { |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 1460 | const struct inet_connection_sock *icsk = inet_csk(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1461 | struct tcp_sock *tp = tcp_sk(sk); |
| 1462 | struct sk_buff *skb; |
| 1463 | int packet_cnt = tp->lost_out; |
| 1464 | |
| 1465 | /* First pass: retransmit lost packets. */ |
| 1466 | if (packet_cnt) { |
| 1467 | sk_stream_for_retrans_queue(skb, sk) { |
| 1468 | __u8 sacked = TCP_SKB_CB(skb)->sacked; |
| 1469 | |
| 1470 | /* Assume this retransmit will generate |
| 1471 | * only one packet for congestion window |
| 1472 | * calculation purposes. This works because |
| 1473 | * tcp_retransmit_skb() will chop up the |
| 1474 | * packet to be MSS sized and all the |
| 1475 | * packet counting works out. |
| 1476 | */ |
| 1477 | if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) |
| 1478 | return; |
| 1479 | |
| 1480 | if (sacked&TCPCB_LOST) { |
| 1481 | if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) { |
| 1482 | if (tcp_retransmit_skb(sk, skb)) |
| 1483 | return; |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 1484 | if (icsk->icsk_ca_state != TCP_CA_Loss) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1485 | NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS); |
| 1486 | else |
| 1487 | NET_INC_STATS_BH(LINUX_MIB_TCPSLOWSTARTRETRANS); |
| 1488 | |
| 1489 | if (skb == |
| 1490 | skb_peek(&sk->sk_write_queue)) |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1491 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 1492 | inet_csk(sk)->icsk_rto, |
| 1493 | TCP_RTO_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1494 | } |
| 1495 | |
| 1496 | packet_cnt -= tcp_skb_pcount(skb); |
| 1497 | if (packet_cnt <= 0) |
| 1498 | break; |
| 1499 | } |
| 1500 | } |
| 1501 | } |
| 1502 | |
| 1503 | /* OK, demanded retransmission is finished. */ |
| 1504 | |
| 1505 | /* Forward retransmissions are possible only during Recovery. */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 1506 | if (icsk->icsk_ca_state != TCP_CA_Recovery) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1507 | return; |
| 1508 | |
| 1509 | /* No forward retransmissions in Reno are possible. */ |
| 1510 | if (!tp->rx_opt.sack_ok) |
| 1511 | return; |
| 1512 | |
| 1513 | /* Yeah, we have to make difficult choice between forward transmission |
| 1514 | * and retransmission... Both ways have their merits... |
| 1515 | * |
| 1516 | * For now we do not retransmit anything, while we have some new |
| 1517 | * segments to send. |
| 1518 | */ |
| 1519 | |
| 1520 | if (tcp_may_send_now(sk, tp)) |
| 1521 | return; |
| 1522 | |
| 1523 | packet_cnt = 0; |
| 1524 | |
| 1525 | sk_stream_for_retrans_queue(skb, sk) { |
| 1526 | /* Similar to the retransmit loop above we |
| 1527 | * can pretend that the retransmitted SKB |
| 1528 | * we send out here will be composed of one |
| 1529 | * real MSS sized packet because tcp_retransmit_skb() |
| 1530 | * will fragment it if necessary. |
| 1531 | */ |
| 1532 | if (++packet_cnt > tp->fackets_out) |
| 1533 | break; |
| 1534 | |
| 1535 | if (tcp_packets_in_flight(tp) >= tp->snd_cwnd) |
| 1536 | break; |
| 1537 | |
| 1538 | if (TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) |
| 1539 | continue; |
| 1540 | |
| 1541 | /* Ok, retransmit it. */ |
| 1542 | if (tcp_retransmit_skb(sk, skb)) |
| 1543 | break; |
| 1544 | |
| 1545 | if (skb == skb_peek(&sk->sk_write_queue)) |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 1546 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
| 1547 | inet_csk(sk)->icsk_rto, |
| 1548 | TCP_RTO_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1549 | |
| 1550 | NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS); |
| 1551 | } |
| 1552 | } |
| 1553 | |
| 1554 | |
| 1555 | /* Send a fin. The caller locks the socket for us. This cannot be |
| 1556 | * allowed to fail queueing a FIN frame under any circumstances. |
| 1557 | */ |
| 1558 | void tcp_send_fin(struct sock *sk) |
| 1559 | { |
| 1560 | struct tcp_sock *tp = tcp_sk(sk); |
| 1561 | struct sk_buff *skb = skb_peek_tail(&sk->sk_write_queue); |
| 1562 | int mss_now; |
| 1563 | |
| 1564 | /* Optimization, tack on the FIN if we have a queue of |
| 1565 | * unsent frames. But be careful about outgoing SACKS |
| 1566 | * and IP options. |
| 1567 | */ |
| 1568 | mss_now = tcp_current_mss(sk, 1); |
| 1569 | |
| 1570 | if (sk->sk_send_head != NULL) { |
| 1571 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN; |
| 1572 | TCP_SKB_CB(skb)->end_seq++; |
| 1573 | tp->write_seq++; |
| 1574 | } else { |
| 1575 | /* Socket is locked, keep trying until memory is available. */ |
| 1576 | for (;;) { |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 1577 | skb = alloc_skb_fclone(MAX_TCP_HEADER, GFP_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1578 | if (skb) |
| 1579 | break; |
| 1580 | yield(); |
| 1581 | } |
| 1582 | |
| 1583 | /* Reserve space for headers and prepare control bits. */ |
| 1584 | skb_reserve(skb, MAX_TCP_HEADER); |
| 1585 | skb->csum = 0; |
| 1586 | TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN); |
| 1587 | TCP_SKB_CB(skb)->sacked = 0; |
| 1588 | skb_shinfo(skb)->tso_segs = 1; |
| 1589 | skb_shinfo(skb)->tso_size = 0; |
| 1590 | |
| 1591 | /* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */ |
| 1592 | TCP_SKB_CB(skb)->seq = tp->write_seq; |
| 1593 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; |
| 1594 | tcp_queue_skb(sk, skb); |
| 1595 | } |
| 1596 | __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF); |
| 1597 | } |
| 1598 | |
| 1599 | /* We get here when a process closes a file descriptor (either due to |
| 1600 | * an explicit close() or as a byproduct of exit()'ing) and there |
| 1601 | * was unread data in the receive queue. This behavior is recommended |
| 1602 | * by draft-ietf-tcpimpl-prob-03.txt section 3.10. -DaveM |
| 1603 | */ |
Victor Fusco | 86a76ca | 2005-07-08 14:57:47 -0700 | [diff] [blame] | 1604 | void tcp_send_active_reset(struct sock *sk, unsigned int __nocast priority) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | { |
| 1606 | struct tcp_sock *tp = tcp_sk(sk); |
| 1607 | struct sk_buff *skb; |
| 1608 | |
| 1609 | /* NOTE: No TCP options attached and we never retransmit this. */ |
| 1610 | skb = alloc_skb(MAX_TCP_HEADER, priority); |
| 1611 | if (!skb) { |
| 1612 | NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); |
| 1613 | return; |
| 1614 | } |
| 1615 | |
| 1616 | /* Reserve space for headers and prepare control bits. */ |
| 1617 | skb_reserve(skb, MAX_TCP_HEADER); |
| 1618 | skb->csum = 0; |
| 1619 | TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST); |
| 1620 | TCP_SKB_CB(skb)->sacked = 0; |
| 1621 | skb_shinfo(skb)->tso_segs = 1; |
| 1622 | skb_shinfo(skb)->tso_size = 0; |
| 1623 | |
| 1624 | /* Send it off. */ |
| 1625 | TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); |
| 1626 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; |
| 1627 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
| 1628 | if (tcp_transmit_skb(sk, skb)) |
| 1629 | NET_INC_STATS(LINUX_MIB_TCPABORTFAILED); |
| 1630 | } |
| 1631 | |
| 1632 | /* WARNING: This routine must only be called when we have already sent |
| 1633 | * a SYN packet that crossed the incoming SYN that caused this routine |
| 1634 | * to get called. If this assumption fails then the initial rcv_wnd |
| 1635 | * and rcv_wscale values will not be correct. |
| 1636 | */ |
| 1637 | int tcp_send_synack(struct sock *sk) |
| 1638 | { |
| 1639 | struct sk_buff* skb; |
| 1640 | |
| 1641 | skb = skb_peek(&sk->sk_write_queue); |
| 1642 | if (skb == NULL || !(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_SYN)) { |
| 1643 | printk(KERN_DEBUG "tcp_send_synack: wrong queue state\n"); |
| 1644 | return -EFAULT; |
| 1645 | } |
| 1646 | if (!(TCP_SKB_CB(skb)->flags&TCPCB_FLAG_ACK)) { |
| 1647 | if (skb_cloned(skb)) { |
| 1648 | struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); |
| 1649 | if (nskb == NULL) |
| 1650 | return -ENOMEM; |
| 1651 | __skb_unlink(skb, &sk->sk_write_queue); |
| 1652 | skb_header_release(nskb); |
| 1653 | __skb_queue_head(&sk->sk_write_queue, nskb); |
| 1654 | sk_stream_free_skb(sk, skb); |
| 1655 | sk_charge_skb(sk, nskb); |
| 1656 | skb = nskb; |
| 1657 | } |
| 1658 | |
| 1659 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ACK; |
| 1660 | TCP_ECN_send_synack(tcp_sk(sk), skb); |
| 1661 | } |
| 1662 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
| 1663 | return tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)); |
| 1664 | } |
| 1665 | |
| 1666 | /* |
| 1667 | * Prepare a SYN-ACK. |
| 1668 | */ |
| 1669 | struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst, |
Arnaldo Carvalho de Melo | 60236fd | 2005-06-18 22:47:21 -0700 | [diff] [blame] | 1670 | struct request_sock *req) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1671 | { |
Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 1672 | struct inet_request_sock *ireq = inet_rsk(req); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1673 | struct tcp_sock *tp = tcp_sk(sk); |
| 1674 | struct tcphdr *th; |
| 1675 | int tcp_header_size; |
| 1676 | struct sk_buff *skb; |
| 1677 | |
| 1678 | skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); |
| 1679 | if (skb == NULL) |
| 1680 | return NULL; |
| 1681 | |
| 1682 | /* Reserve space for headers. */ |
| 1683 | skb_reserve(skb, MAX_TCP_HEADER); |
| 1684 | |
| 1685 | skb->dst = dst_clone(dst); |
| 1686 | |
| 1687 | tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS + |
Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 1688 | (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) + |
| 1689 | (ireq->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) + |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1690 | /* SACK_PERM is in the place of NOP NOP of TS */ |
Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 1691 | ((ireq->sack_ok && !ireq->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1692 | skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size); |
| 1693 | |
| 1694 | memset(th, 0, sizeof(struct tcphdr)); |
| 1695 | th->syn = 1; |
| 1696 | th->ack = 1; |
| 1697 | if (dst->dev->features&NETIF_F_TSO) |
Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 1698 | ireq->ecn_ok = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1699 | TCP_ECN_make_synack(req, th); |
| 1700 | th->source = inet_sk(sk)->sport; |
Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 1701 | th->dest = ireq->rmt_port; |
| 1702 | TCP_SKB_CB(skb)->seq = tcp_rsk(req)->snt_isn; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1703 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; |
| 1704 | TCP_SKB_CB(skb)->sacked = 0; |
| 1705 | skb_shinfo(skb)->tso_segs = 1; |
| 1706 | skb_shinfo(skb)->tso_size = 0; |
| 1707 | th->seq = htonl(TCP_SKB_CB(skb)->seq); |
Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 1708 | th->ack_seq = htonl(tcp_rsk(req)->rcv_isn + 1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1709 | if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */ |
| 1710 | __u8 rcv_wscale; |
| 1711 | /* Set this up on the first call only */ |
| 1712 | req->window_clamp = tp->window_clamp ? : dst_metric(dst, RTAX_WINDOW); |
| 1713 | /* tcp_full_space because it is guaranteed to be the first packet */ |
| 1714 | tcp_select_initial_window(tcp_full_space(sk), |
Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 1715 | dst_metric(dst, RTAX_ADVMSS) - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1716 | &req->rcv_wnd, |
| 1717 | &req->window_clamp, |
Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 1718 | ireq->wscale_ok, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1719 | &rcv_wscale); |
Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 1720 | ireq->rcv_wscale = rcv_wscale; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1721 | } |
| 1722 | |
| 1723 | /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */ |
| 1724 | th->window = htons(req->rcv_wnd); |
| 1725 | |
| 1726 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
Arnaldo Carvalho de Melo | 2e6599c | 2005-06-18 22:46:52 -0700 | [diff] [blame] | 1727 | tcp_syn_build_options((__u32 *)(th + 1), dst_metric(dst, RTAX_ADVMSS), ireq->tstamp_ok, |
| 1728 | ireq->sack_ok, ireq->wscale_ok, ireq->rcv_wscale, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1729 | TCP_SKB_CB(skb)->when, |
| 1730 | req->ts_recent); |
| 1731 | |
| 1732 | skb->csum = 0; |
| 1733 | th->doff = (tcp_header_size >> 2); |
| 1734 | TCP_INC_STATS(TCP_MIB_OUTSEGS); |
| 1735 | return skb; |
| 1736 | } |
| 1737 | |
| 1738 | /* |
| 1739 | * Do all connect socket setups that can be done AF independent. |
| 1740 | */ |
| 1741 | static inline void tcp_connect_init(struct sock *sk) |
| 1742 | { |
| 1743 | struct dst_entry *dst = __sk_dst_get(sk); |
| 1744 | struct tcp_sock *tp = tcp_sk(sk); |
| 1745 | __u8 rcv_wscale; |
| 1746 | |
| 1747 | /* We'll fix this up when we get a response from the other end. |
| 1748 | * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT. |
| 1749 | */ |
| 1750 | tp->tcp_header_len = sizeof(struct tcphdr) + |
| 1751 | (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0); |
| 1752 | |
| 1753 | /* If user gave his TCP_MAXSEG, record it to clamp */ |
| 1754 | if (tp->rx_opt.user_mss) |
| 1755 | tp->rx_opt.mss_clamp = tp->rx_opt.user_mss; |
| 1756 | tp->max_window = 0; |
| 1757 | tcp_sync_mss(sk, dst_mtu(dst)); |
| 1758 | |
| 1759 | if (!tp->window_clamp) |
| 1760 | tp->window_clamp = dst_metric(dst, RTAX_WINDOW); |
| 1761 | tp->advmss = dst_metric(dst, RTAX_ADVMSS); |
| 1762 | tcp_initialize_rcv_mss(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1763 | |
| 1764 | tcp_select_initial_window(tcp_full_space(sk), |
| 1765 | tp->advmss - (tp->rx_opt.ts_recent_stamp ? tp->tcp_header_len - sizeof(struct tcphdr) : 0), |
| 1766 | &tp->rcv_wnd, |
| 1767 | &tp->window_clamp, |
| 1768 | sysctl_tcp_window_scaling, |
| 1769 | &rcv_wscale); |
| 1770 | |
| 1771 | tp->rx_opt.rcv_wscale = rcv_wscale; |
| 1772 | tp->rcv_ssthresh = tp->rcv_wnd; |
| 1773 | |
| 1774 | sk->sk_err = 0; |
| 1775 | sock_reset_flag(sk, SOCK_DONE); |
| 1776 | tp->snd_wnd = 0; |
| 1777 | tcp_init_wl(tp, tp->write_seq, 0); |
| 1778 | tp->snd_una = tp->write_seq; |
| 1779 | tp->snd_sml = tp->write_seq; |
| 1780 | tp->rcv_nxt = 0; |
| 1781 | tp->rcv_wup = 0; |
| 1782 | tp->copied_seq = 0; |
| 1783 | |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1784 | inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT; |
| 1785 | inet_csk(sk)->icsk_retransmits = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1786 | tcp_clear_retrans(tp); |
| 1787 | } |
| 1788 | |
| 1789 | /* |
| 1790 | * Build a SYN and send it off. |
| 1791 | */ |
| 1792 | int tcp_connect(struct sock *sk) |
| 1793 | { |
| 1794 | struct tcp_sock *tp = tcp_sk(sk); |
| 1795 | struct sk_buff *buff; |
| 1796 | |
| 1797 | tcp_connect_init(sk); |
| 1798 | |
David S. Miller | d179cd1 | 2005-08-17 14:57:30 -0700 | [diff] [blame] | 1799 | buff = alloc_skb_fclone(MAX_TCP_HEADER + 15, sk->sk_allocation); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1800 | if (unlikely(buff == NULL)) |
| 1801 | return -ENOBUFS; |
| 1802 | |
| 1803 | /* Reserve space for headers. */ |
| 1804 | skb_reserve(buff, MAX_TCP_HEADER); |
| 1805 | |
| 1806 | TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; |
| 1807 | TCP_ECN_send_syn(sk, tp, buff); |
| 1808 | TCP_SKB_CB(buff)->sacked = 0; |
| 1809 | skb_shinfo(buff)->tso_segs = 1; |
| 1810 | skb_shinfo(buff)->tso_size = 0; |
| 1811 | buff->csum = 0; |
| 1812 | TCP_SKB_CB(buff)->seq = tp->write_seq++; |
| 1813 | TCP_SKB_CB(buff)->end_seq = tp->write_seq; |
| 1814 | tp->snd_nxt = tp->write_seq; |
| 1815 | tp->pushed_seq = tp->write_seq; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1816 | |
| 1817 | /* Send it off. */ |
| 1818 | TCP_SKB_CB(buff)->when = tcp_time_stamp; |
| 1819 | tp->retrans_stamp = TCP_SKB_CB(buff)->when; |
| 1820 | skb_header_release(buff); |
| 1821 | __skb_queue_tail(&sk->sk_write_queue, buff); |
| 1822 | sk_charge_skb(sk, buff); |
| 1823 | tp->packets_out += tcp_skb_pcount(buff); |
| 1824 | tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL)); |
| 1825 | TCP_INC_STATS(TCP_MIB_ACTIVEOPENS); |
| 1826 | |
| 1827 | /* Timer for repeating the SYN until an answer. */ |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 1828 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, |
| 1829 | inet_csk(sk)->icsk_rto, TCP_RTO_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1830 | return 0; |
| 1831 | } |
| 1832 | |
| 1833 | /* Send out a delayed ack, the caller does the policy checking |
| 1834 | * to see if we should even be here. See tcp_input.c:tcp_ack_snd_check() |
| 1835 | * for details. |
| 1836 | */ |
| 1837 | void tcp_send_delayed_ack(struct sock *sk) |
| 1838 | { |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1839 | struct inet_connection_sock *icsk = inet_csk(sk); |
| 1840 | int ato = icsk->icsk_ack.ato; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1841 | unsigned long timeout; |
| 1842 | |
| 1843 | if (ato > TCP_DELACK_MIN) { |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1844 | const struct tcp_sock *tp = tcp_sk(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1845 | int max_ato = HZ/2; |
| 1846 | |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1847 | if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1848 | max_ato = TCP_DELACK_MAX; |
| 1849 | |
| 1850 | /* Slow path, intersegment interval is "high". */ |
| 1851 | |
| 1852 | /* If some rtt estimate is known, use it to bound delayed ack. |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1853 | * Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1854 | * directly. |
| 1855 | */ |
| 1856 | if (tp->srtt) { |
| 1857 | int rtt = max(tp->srtt>>3, TCP_DELACK_MIN); |
| 1858 | |
| 1859 | if (rtt < max_ato) |
| 1860 | max_ato = rtt; |
| 1861 | } |
| 1862 | |
| 1863 | ato = min(ato, max_ato); |
| 1864 | } |
| 1865 | |
| 1866 | /* Stay within the limit we were given */ |
| 1867 | timeout = jiffies + ato; |
| 1868 | |
| 1869 | /* Use new timeout only if there wasn't a older one earlier. */ |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1870 | if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1871 | /* If delack timer was blocked or is about to expire, |
| 1872 | * send ACK now. |
| 1873 | */ |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1874 | if (icsk->icsk_ack.blocked || |
| 1875 | time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1876 | tcp_send_ack(sk); |
| 1877 | return; |
| 1878 | } |
| 1879 | |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1880 | if (!time_before(timeout, icsk->icsk_ack.timeout)) |
| 1881 | timeout = icsk->icsk_ack.timeout; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1882 | } |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1883 | icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER; |
| 1884 | icsk->icsk_ack.timeout = timeout; |
| 1885 | sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1886 | } |
| 1887 | |
| 1888 | /* This routine sends an ack and also updates the window. */ |
| 1889 | void tcp_send_ack(struct sock *sk) |
| 1890 | { |
| 1891 | /* If we have been reset, we may not send again. */ |
| 1892 | if (sk->sk_state != TCP_CLOSE) { |
| 1893 | struct tcp_sock *tp = tcp_sk(sk); |
| 1894 | struct sk_buff *buff; |
| 1895 | |
| 1896 | /* We are not putting this on the write queue, so |
| 1897 | * tcp_transmit_skb() will set the ownership to this |
| 1898 | * sock. |
| 1899 | */ |
| 1900 | buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); |
| 1901 | if (buff == NULL) { |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 1902 | inet_csk_schedule_ack(sk); |
| 1903 | inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN; |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 1904 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, |
| 1905 | TCP_DELACK_MAX, TCP_RTO_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1906 | return; |
| 1907 | } |
| 1908 | |
| 1909 | /* Reserve space for headers and prepare control bits. */ |
| 1910 | skb_reserve(buff, MAX_TCP_HEADER); |
| 1911 | buff->csum = 0; |
| 1912 | TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK; |
| 1913 | TCP_SKB_CB(buff)->sacked = 0; |
| 1914 | skb_shinfo(buff)->tso_segs = 1; |
| 1915 | skb_shinfo(buff)->tso_size = 0; |
| 1916 | |
| 1917 | /* Send it off, this clears delayed acks for us. */ |
| 1918 | TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); |
| 1919 | TCP_SKB_CB(buff)->when = tcp_time_stamp; |
| 1920 | tcp_transmit_skb(sk, buff); |
| 1921 | } |
| 1922 | } |
| 1923 | |
| 1924 | /* This routine sends a packet with an out of date sequence |
| 1925 | * number. It assumes the other end will try to ack it. |
| 1926 | * |
| 1927 | * Question: what should we make while urgent mode? |
| 1928 | * 4.4BSD forces sending single byte of data. We cannot send |
| 1929 | * out of window data, because we have SND.NXT==SND.MAX... |
| 1930 | * |
| 1931 | * Current solution: to send TWO zero-length segments in urgent mode: |
| 1932 | * one is with SEG.SEQ=SND.UNA to deliver urgent pointer, another is |
| 1933 | * out-of-date with SND.UNA-1 to probe window. |
| 1934 | */ |
| 1935 | static int tcp_xmit_probe_skb(struct sock *sk, int urgent) |
| 1936 | { |
| 1937 | struct tcp_sock *tp = tcp_sk(sk); |
| 1938 | struct sk_buff *skb; |
| 1939 | |
| 1940 | /* We don't queue it, tcp_transmit_skb() sets ownership. */ |
| 1941 | skb = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC); |
| 1942 | if (skb == NULL) |
| 1943 | return -1; |
| 1944 | |
| 1945 | /* Reserve space for headers and set control bits. */ |
| 1946 | skb_reserve(skb, MAX_TCP_HEADER); |
| 1947 | skb->csum = 0; |
| 1948 | TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK; |
| 1949 | TCP_SKB_CB(skb)->sacked = urgent; |
| 1950 | skb_shinfo(skb)->tso_segs = 1; |
| 1951 | skb_shinfo(skb)->tso_size = 0; |
| 1952 | |
| 1953 | /* Use a previous sequence. This should cause the other |
| 1954 | * end to send an ack. Don't queue or clone SKB, just |
| 1955 | * send it. |
| 1956 | */ |
| 1957 | TCP_SKB_CB(skb)->seq = urgent ? tp->snd_una : tp->snd_una - 1; |
| 1958 | TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; |
| 1959 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
| 1960 | return tcp_transmit_skb(sk, skb); |
| 1961 | } |
| 1962 | |
| 1963 | int tcp_write_wakeup(struct sock *sk) |
| 1964 | { |
| 1965 | if (sk->sk_state != TCP_CLOSE) { |
| 1966 | struct tcp_sock *tp = tcp_sk(sk); |
| 1967 | struct sk_buff *skb; |
| 1968 | |
| 1969 | if ((skb = sk->sk_send_head) != NULL && |
| 1970 | before(TCP_SKB_CB(skb)->seq, tp->snd_una+tp->snd_wnd)) { |
| 1971 | int err; |
| 1972 | unsigned int mss = tcp_current_mss(sk, 0); |
| 1973 | unsigned int seg_size = tp->snd_una+tp->snd_wnd-TCP_SKB_CB(skb)->seq; |
| 1974 | |
| 1975 | if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) |
| 1976 | tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; |
| 1977 | |
| 1978 | /* We are probing the opening of a window |
| 1979 | * but the window size is != 0 |
| 1980 | * must have been a result SWS avoidance ( sender ) |
| 1981 | */ |
| 1982 | if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || |
| 1983 | skb->len > mss) { |
| 1984 | seg_size = min(seg_size, mss); |
| 1985 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 1986 | if (tcp_fragment(sk, skb, seg_size, mss)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1987 | return -1; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1988 | } else if (!tcp_skb_pcount(skb)) |
David S. Miller | 846998a | 2005-08-04 19:52:01 -0700 | [diff] [blame] | 1989 | tcp_set_skb_tso_segs(sk, skb, mss); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1990 | |
| 1991 | TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH; |
| 1992 | TCP_SKB_CB(skb)->when = tcp_time_stamp; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1993 | err = tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC)); |
| 1994 | if (!err) { |
| 1995 | update_send_head(sk, tp, skb); |
| 1996 | } |
| 1997 | return err; |
| 1998 | } else { |
| 1999 | if (tp->urg_mode && |
| 2000 | between(tp->snd_up, tp->snd_una+1, tp->snd_una+0xFFFF)) |
| 2001 | tcp_xmit_probe_skb(sk, TCPCB_URG); |
| 2002 | return tcp_xmit_probe_skb(sk, 0); |
| 2003 | } |
| 2004 | } |
| 2005 | return -1; |
| 2006 | } |
| 2007 | |
| 2008 | /* A window probe timeout has occurred. If window is not closed send |
| 2009 | * a partial packet else a zero probe. |
| 2010 | */ |
| 2011 | void tcp_send_probe0(struct sock *sk) |
| 2012 | { |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 2013 | struct inet_connection_sock *icsk = inet_csk(sk); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2014 | struct tcp_sock *tp = tcp_sk(sk); |
| 2015 | int err; |
| 2016 | |
| 2017 | err = tcp_write_wakeup(sk); |
| 2018 | |
| 2019 | if (tp->packets_out || !sk->sk_send_head) { |
| 2020 | /* Cancel probe timer, if it is not required. */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 2021 | icsk->icsk_probes_out = 0; |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 2022 | icsk->icsk_backoff = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2023 | return; |
| 2024 | } |
| 2025 | |
| 2026 | if (err <= 0) { |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 2027 | if (icsk->icsk_backoff < sysctl_tcp_retries2) |
| 2028 | icsk->icsk_backoff++; |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 2029 | icsk->icsk_probes_out++; |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 2030 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 2031 | min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX), |
| 2032 | TCP_RTO_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2033 | } else { |
| 2034 | /* If packet was not sent due to local congestion, |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 2035 | * do not backoff and do not remember icsk_probes_out. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2036 | * Let local senders to fight for local resources. |
| 2037 | * |
| 2038 | * Use accumulated backoff yet. |
| 2039 | */ |
Arnaldo Carvalho de Melo | 6687e98 | 2005-08-10 04:03:31 -0300 | [diff] [blame] | 2040 | if (!icsk->icsk_probes_out) |
| 2041 | icsk->icsk_probes_out = 1; |
Arnaldo Carvalho de Melo | 463c84b | 2005-08-09 20:10:42 -0700 | [diff] [blame] | 2042 | inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, |
| 2043 | min(icsk->icsk_rto << icsk->icsk_backoff, |
Arnaldo Carvalho de Melo | 3f421ba | 2005-08-09 20:11:08 -0700 | [diff] [blame] | 2044 | TCP_RESOURCE_PROBE_INTERVAL), |
| 2045 | TCP_RTO_MAX); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2046 | } |
| 2047 | } |
| 2048 | |
| 2049 | EXPORT_SYMBOL(tcp_connect); |
| 2050 | EXPORT_SYMBOL(tcp_make_synack); |
| 2051 | EXPORT_SYMBOL(tcp_simple_retransmit); |
| 2052 | EXPORT_SYMBOL(tcp_sync_mss); |