blob: aff31427f525fb982ea0f86e24597e43d3a440aa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250#include <linux/module.h>
251#include <linux/types.h>
252#include <linux/fcntl.h>
253#include <linux/poll.h>
254#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255#include <linux/fs.h>
256#include <linux/random.h>
257#include <linux/bootmem.h>
David S. Millerb8059ea2006-03-25 01:36:56 -0800258#include <linux/cache.h>
Herbert Xuf4c50d92006-06-22 03:02:40 -0700259#include <linux/err.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800260#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261
262#include <net/icmp.h>
263#include <net/tcp.h>
264#include <net/xfrm.h>
265#include <net/ip.h>
Chris Leech1a2449a2006-05-23 18:05:53 -0700266#include <net/netdma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267
268#include <asm/uaccess.h>
269#include <asm/ioctls.h>
270
Brian Haleyab32ea52006-09-22 14:15:41 -0700271int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272
Eric Dumazetba899662005-08-26 12:05:31 -0700273DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275atomic_t tcp_orphan_count = ATOMIC_INIT(0);
276
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -0700277EXPORT_SYMBOL_GPL(tcp_orphan_count);
278
David S. Millerb8059ea2006-03-25 01:36:56 -0800279int sysctl_tcp_mem[3] __read_mostly;
280int sysctl_tcp_wmem[3] __read_mostly;
281int sysctl_tcp_rmem[3] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
283EXPORT_SYMBOL(sysctl_tcp_mem);
284EXPORT_SYMBOL(sysctl_tcp_rmem);
285EXPORT_SYMBOL(sysctl_tcp_wmem);
286
287atomic_t tcp_memory_allocated; /* Current allocated memory. */
288atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
289
290EXPORT_SYMBOL(tcp_memory_allocated);
291EXPORT_SYMBOL(tcp_sockets_allocated);
292
293/*
294 * Pressure flag: try to collapse.
295 * Technical note: it is used by multiple contexts non atomically.
296 * All the sk_stream_mem_schedule() is of this nature: accounting
297 * is strict, actions are advisory and have some latency.
298 */
Eric Dumazet4103f8c2007-03-27 13:58:31 -0700299int tcp_memory_pressure __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301EXPORT_SYMBOL(tcp_memory_pressure);
302
303void tcp_enter_memory_pressure(void)
304{
305 if (!tcp_memory_pressure) {
306 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
307 tcp_memory_pressure = 1;
308 }
309}
310
311EXPORT_SYMBOL(tcp_enter_memory_pressure);
312
313/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 * Wait for a TCP event.
315 *
316 * Note that we don't need to lock the socket, as the upper poll layers
317 * take care of normal races (between the test and the event) and we don't
318 * go look at any of the socket buffers directly.
319 */
320unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
321{
322 unsigned int mask;
323 struct sock *sk = sock->sk;
324 struct tcp_sock *tp = tcp_sk(sk);
325
326 poll_wait(file, sk->sk_sleep, wait);
327 if (sk->sk_state == TCP_LISTEN)
Arnaldo Carvalho de Melodc40c7b2005-08-23 21:52:58 -0700328 return inet_csk_listen_poll(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330 /* Socket is not locked. We are protected from async events
331 by poll logic and correct handling of state changes
332 made by another threads is impossible in any case.
333 */
334
335 mask = 0;
336 if (sk->sk_err)
337 mask = POLLERR;
338
339 /*
340 * POLLHUP is certainly not done right. But poll() doesn't
341 * have a notion of HUP in just one direction, and for a
342 * socket the read side is more interesting.
343 *
344 * Some poll() documentation says that POLLHUP is incompatible
345 * with the POLLOUT/POLLWR flags, so somebody should check this
346 * all. But careful, it tends to be safer to return too many
347 * bits than too few, and you can easily break real applications
348 * if you don't tell them that something has hung up!
349 *
350 * Check-me.
351 *
352 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
353 * our fs/select.c). It means that after we received EOF,
354 * poll always returns immediately, making impossible poll() on write()
355 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
356 * if and only if shutdown has been made in both directions.
357 * Actually, it is interesting to look how Solaris and DUX
358 * solve this dilemma. I would prefer, if PULLHUP were maskable,
359 * then we could set it on SND_SHUTDOWN. BTW examples given
360 * in Stevens' books assume exactly this behaviour, it explains
361 * why PULLHUP is incompatible with POLLOUT. --ANK
362 *
363 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
364 * blocking on fresh not-connected or disconnected socket. --ANK
365 */
366 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
367 mask |= POLLHUP;
368 if (sk->sk_shutdown & RCV_SHUTDOWN)
Davide Libenzif348d702006-03-25 03:07:39 -0800369 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370
371 /* Connected? */
372 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
373 /* Potential race condition. If read of tp below will
374 * escape above sk->sk_state, we can be illegally awaken
375 * in SYN_* states. */
376 if ((tp->rcv_nxt != tp->copied_seq) &&
377 (tp->urg_seq != tp->copied_seq ||
378 tp->rcv_nxt != tp->copied_seq + 1 ||
379 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
380 mask |= POLLIN | POLLRDNORM;
381
382 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
383 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
384 mask |= POLLOUT | POLLWRNORM;
385 } else { /* send SIGIO later */
386 set_bit(SOCK_ASYNC_NOSPACE,
387 &sk->sk_socket->flags);
388 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
389
390 /* Race breaker. If space is freed after
391 * wspace test but before the flags are set,
392 * IO signal will be lost.
393 */
394 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
395 mask |= POLLOUT | POLLWRNORM;
396 }
397 }
398
399 if (tp->urg_data & TCP_URG_VALID)
400 mask |= POLLPRI;
401 }
402 return mask;
403}
404
405int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
406{
407 struct tcp_sock *tp = tcp_sk(sk);
408 int answ;
409
410 switch (cmd) {
411 case SIOCINQ:
412 if (sk->sk_state == TCP_LISTEN)
413 return -EINVAL;
414
415 lock_sock(sk);
416 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
417 answ = 0;
418 else if (sock_flag(sk, SOCK_URGINLINE) ||
419 !tp->urg_data ||
420 before(tp->urg_seq, tp->copied_seq) ||
421 !before(tp->urg_seq, tp->rcv_nxt)) {
422 answ = tp->rcv_nxt - tp->copied_seq;
423
424 /* Subtract 1, if FIN is in queue. */
425 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
426 answ -=
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700427 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 } else
429 answ = tp->urg_seq - tp->copied_seq;
430 release_sock(sk);
431 break;
432 case SIOCATMARK:
433 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
434 break;
435 case SIOCOUTQ:
436 if (sk->sk_state == TCP_LISTEN)
437 return -EINVAL;
438
439 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
440 answ = 0;
441 else
442 answ = tp->write_seq - tp->snd_una;
443 break;
444 default:
445 return -ENOIOCTLCMD;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700446 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448 return put_user(answ, (int __user *)arg);
449}
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
452{
453 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
454 tp->pushed_seq = tp->write_seq;
455}
456
457static inline int forced_push(struct tcp_sock *tp)
458{
459 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
460}
461
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700462static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700464 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo352d4802006-11-17 19:59:12 -0200465 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
466
467 skb->csum = 0;
468 tcb->seq = tcb->end_seq = tp->write_seq;
469 tcb->flags = TCPCB_FLAG_ACK;
470 tcb->sacked = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 skb_header_release(skb);
David S. Millerfe067e82007-03-07 12:12:44 -0800472 tcp_add_write_queue_tail(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 sk_charge_skb(sk, skb);
David S. Miller89ebd192005-08-23 10:13:06 -0700474 if (tp->nonagle & TCP_NAGLE_PUSH)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900475 tp->nonagle &= ~TCP_NAGLE_PUSH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
477
478static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
479 struct sk_buff *skb)
480{
481 if (flags & MSG_OOB) {
482 tp->urg_mode = 1;
483 tp->snd_up = tp->write_seq;
484 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
485 }
486}
487
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700488static inline void tcp_push(struct sock *sk, int flags, int mss_now,
489 int nonagle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700491 struct tcp_sock *tp = tcp_sk(sk);
492
David S. Millerfe067e82007-03-07 12:12:44 -0800493 if (tcp_send_head(sk)) {
494 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 if (!(flags & MSG_MORE) || forced_push(tp))
496 tcp_mark_push(tp, skb);
497 tcp_mark_urg(tp, flags, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700498 __tcp_push_pending_frames(sk, mss_now,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
500 }
501}
502
503static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
504 size_t psize, int flags)
505{
506 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700507 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 int err;
509 ssize_t copied;
510 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
511
512 /* Wait for a connection to finish. */
513 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
514 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
515 goto out_err;
516
517 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
518
519 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700520 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 copied = 0;
522
523 err = -EPIPE;
524 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
525 goto do_error;
526
527 while (psize > 0) {
David S. Millerfe067e82007-03-07 12:12:44 -0800528 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 struct page *page = pages[poffset / PAGE_SIZE];
530 int copy, i, can_coalesce;
531 int offset = poffset % PAGE_SIZE;
532 int size = min_t(size_t, psize, PAGE_SIZE - offset);
533
David S. Millerfe067e82007-03-07 12:12:44 -0800534 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535new_segment:
536 if (!sk_stream_memory_free(sk))
537 goto wait_for_sndbuf;
538
539 skb = sk_stream_alloc_pskb(sk, 0, 0,
540 sk->sk_allocation);
541 if (!skb)
542 goto wait_for_memory;
543
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700544 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700545 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 }
547
548 if (copy > size)
549 copy = size;
550
551 i = skb_shinfo(skb)->nr_frags;
552 can_coalesce = skb_can_coalesce(skb, i, page, offset);
553 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
554 tcp_mark_push(tp, skb);
555 goto new_segment;
556 }
Herbert Xud80d99d62005-09-01 17:48:23 -0700557 if (!sk_stream_wmem_schedule(sk, copy))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 goto wait_for_memory;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 if (can_coalesce) {
561 skb_shinfo(skb)->frags[i - 1].size += copy;
562 } else {
563 get_page(page);
564 skb_fill_page_desc(skb, i, page, offset, copy);
565 }
566
567 skb->len += copy;
568 skb->data_len += copy;
569 skb->truesize += copy;
570 sk->sk_wmem_queued += copy;
571 sk->sk_forward_alloc -= copy;
Patrick McHardy84fa7932006-08-29 16:44:56 -0700572 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 tp->write_seq += copy;
574 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700575 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 if (!copied)
578 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
579
580 copied += copy;
581 poffset += copy;
582 if (!(psize -= copy))
583 goto out;
584
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700585 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 continue;
587
588 if (forced_push(tp)) {
589 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700590 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800591 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 tcp_push_one(sk, mss_now);
593 continue;
594
595wait_for_sndbuf:
596 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
597wait_for_memory:
598 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700599 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600
601 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
602 goto do_error;
603
604 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700605 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
607
608out:
609 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700610 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 return copied;
612
613do_error:
614 if (copied)
615 goto out;
616out_err:
617 return sk_stream_error(sk, flags, err);
618}
619
620ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
621 size_t size, int flags)
622{
623 ssize_t res;
624 struct sock *sk = sock->sk;
625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 if (!(sk->sk_route_caps & NETIF_F_SG) ||
Herbert Xu8648b302006-06-17 22:06:05 -0700627 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 return sock_no_sendpage(sock, page, offset, size, flags);
629
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 lock_sock(sk);
631 TCP_CHECK_TIMER(sk);
632 res = do_tcp_sendpages(sk, &page, offset, size, flags);
633 TCP_CHECK_TIMER(sk);
634 release_sock(sk);
635 return res;
636}
637
638#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
639#define TCP_OFF(sk) (sk->sk_sndmsg_off)
640
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700641static inline int select_size(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700643 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700644 int tmp = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
David S. Millerb4e26f52005-07-05 15:20:27 -0700646 if (sk->sk_route_caps & NETIF_F_SG) {
Herbert Xubcd76112006-06-30 13:36:35 -0700647 if (sk_can_gso(sk))
David S. Millerb4e26f52005-07-05 15:20:27 -0700648 tmp = 0;
649 else {
650 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
651
652 if (tmp >= pgbreak &&
653 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
654 tmp = pgbreak;
655 }
656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 return tmp;
659}
660
David S. Miller3516ffb2007-08-02 19:23:56 -0700661int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 size_t size)
663{
David S. Miller3516ffb2007-08-02 19:23:56 -0700664 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 struct iovec *iov;
666 struct tcp_sock *tp = tcp_sk(sk);
667 struct sk_buff *skb;
668 int iovlen, flags;
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700669 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 int err, copied;
671 long timeo;
672
673 lock_sock(sk);
674 TCP_CHECK_TIMER(sk);
675
676 flags = msg->msg_flags;
677 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
678
679 /* Wait for a connection to finish. */
680 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
681 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
682 goto out_err;
683
684 /* This should be in poll */
685 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
686
687 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700688 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690 /* Ok commence sending. */
691 iovlen = msg->msg_iovlen;
692 iov = msg->msg_iov;
693 copied = 0;
694
695 err = -EPIPE;
696 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
697 goto do_error;
698
699 while (--iovlen >= 0) {
700 int seglen = iov->iov_len;
701 unsigned char __user *from = iov->iov_base;
702
703 iov++;
704
705 while (seglen > 0) {
706 int copy;
707
David S. Millerfe067e82007-03-07 12:12:44 -0800708 skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709
David S. Millerfe067e82007-03-07 12:12:44 -0800710 if (!tcp_send_head(sk) ||
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700711 (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
713new_segment:
714 /* Allocate new segment. If the interface is SG,
715 * allocate skb fitting to single page.
716 */
717 if (!sk_stream_memory_free(sk))
718 goto wait_for_sndbuf;
719
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700720 skb = sk_stream_alloc_pskb(sk, select_size(sk),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 0, sk->sk_allocation);
722 if (!skb)
723 goto wait_for_memory;
724
725 /*
726 * Check whether we can use HW checksum.
727 */
Herbert Xu8648b302006-06-17 22:06:05 -0700728 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700729 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700731 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700732 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 }
734
735 /* Try to append data to the end of skb. */
736 if (copy > seglen)
737 copy = seglen;
738
739 /* Where to copy to? */
740 if (skb_tailroom(skb) > 0) {
741 /* We have some space in skb head. Superb! */
742 if (copy > skb_tailroom(skb))
743 copy = skb_tailroom(skb);
744 if ((err = skb_add_data(skb, from, copy)) != 0)
745 goto do_fault;
746 } else {
747 int merge = 0;
748 int i = skb_shinfo(skb)->nr_frags;
749 struct page *page = TCP_PAGE(sk);
750 int off = TCP_OFF(sk);
751
752 if (skb_can_coalesce(skb, i, page, off) &&
753 off != PAGE_SIZE) {
754 /* We can extend the last page
755 * fragment. */
756 merge = 1;
757 } else if (i == MAX_SKB_FRAGS ||
758 (!i &&
759 !(sk->sk_route_caps & NETIF_F_SG))) {
760 /* Need to add new fragment and cannot
761 * do this because interface is non-SG,
762 * or because all the page slots are
763 * busy. */
764 tcp_mark_push(tp, skb);
765 goto new_segment;
766 } else if (page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 if (off == PAGE_SIZE) {
768 put_page(page);
769 TCP_PAGE(sk) = page = NULL;
Herbert Xufb5f5e62005-09-05 18:55:48 -0700770 off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 }
Herbert Xuef015782005-09-01 17:48:59 -0700772 } else
Herbert Xufb5f5e62005-09-05 18:55:48 -0700773 off = 0;
Herbert Xuef015782005-09-01 17:48:59 -0700774
775 if (copy > PAGE_SIZE - off)
776 copy = PAGE_SIZE - off;
777
778 if (!sk_stream_wmem_schedule(sk, copy))
779 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780
781 if (!page) {
782 /* Allocate new cache page. */
783 if (!(page = sk_stream_alloc_page(sk)))
784 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 }
786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 /* Time to copy data. We are close to
788 * the end! */
789 err = skb_copy_to_page(sk, from, skb, page,
790 off, copy);
791 if (err) {
792 /* If this page was new, give it to the
793 * socket so it does not get leaked.
794 */
795 if (!TCP_PAGE(sk)) {
796 TCP_PAGE(sk) = page;
797 TCP_OFF(sk) = 0;
798 }
799 goto do_error;
800 }
801
802 /* Update the skb. */
803 if (merge) {
804 skb_shinfo(skb)->frags[i - 1].size +=
805 copy;
806 } else {
807 skb_fill_page_desc(skb, i, page, off, copy);
808 if (TCP_PAGE(sk)) {
809 get_page(page);
810 } else if (off + copy < PAGE_SIZE) {
811 get_page(page);
812 TCP_PAGE(sk) = page;
813 }
814 }
815
816 TCP_OFF(sk) = off + copy;
817 }
818
819 if (!copied)
820 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
821
822 tp->write_seq += copy;
823 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700824 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825
826 from += copy;
827 copied += copy;
828 if ((seglen -= copy) == 0 && iovlen == 0)
829 goto out;
830
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700831 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 continue;
833
834 if (forced_push(tp)) {
835 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700836 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800837 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 tcp_push_one(sk, mss_now);
839 continue;
840
841wait_for_sndbuf:
842 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
843wait_for_memory:
844 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700845 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
847 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
848 goto do_error;
849
850 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700851 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 }
853 }
854
855out:
856 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700857 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858 TCP_CHECK_TIMER(sk);
859 release_sock(sk);
860 return copied;
861
862do_fault:
863 if (!skb->len) {
David S. Millerfe067e82007-03-07 12:12:44 -0800864 tcp_unlink_write_queue(skb, sk);
865 /* It is the one place in all of TCP, except connection
866 * reset, where we can be unlinking the send_head.
867 */
868 tcp_check_send_head(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 sk_stream_free_skb(sk, skb);
870 }
871
872do_error:
873 if (copied)
874 goto out;
875out_err:
876 err = sk_stream_error(sk, flags, err);
877 TCP_CHECK_TIMER(sk);
878 release_sock(sk);
879 return err;
880}
881
882/*
883 * Handle reading urgent data. BSD has very simple semantics for
884 * this, no blocking and very strange errors 8)
885 */
886
887static int tcp_recv_urg(struct sock *sk, long timeo,
888 struct msghdr *msg, int len, int flags,
889 int *addr_len)
890{
891 struct tcp_sock *tp = tcp_sk(sk);
892
893 /* No URG data to read. */
894 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
895 tp->urg_data == TCP_URG_READ)
896 return -EINVAL; /* Yes this is right ! */
897
898 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
899 return -ENOTCONN;
900
901 if (tp->urg_data & TCP_URG_VALID) {
902 int err = 0;
903 char c = tp->urg_data;
904
905 if (!(flags & MSG_PEEK))
906 tp->urg_data = TCP_URG_READ;
907
908 /* Read urgent data. */
909 msg->msg_flags |= MSG_OOB;
910
911 if (len > 0) {
912 if (!(flags & MSG_TRUNC))
913 err = memcpy_toiovec(msg->msg_iov, &c, 1);
914 len = 1;
915 } else
916 msg->msg_flags |= MSG_TRUNC;
917
918 return err ? -EFAULT : len;
919 }
920
921 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
922 return 0;
923
924 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
925 * the available implementations agree in this case:
926 * this call should never block, independent of the
927 * blocking state of the socket.
928 * Mike <pall@rz.uni-karlsruhe.de>
929 */
930 return -EAGAIN;
931}
932
933/* Clean up the receive buffer for full frames taken by the user,
934 * then send an ACK if necessary. COPIED is the number of bytes
935 * tcp_recvmsg has given to the user so far, it speeds up the
936 * calculation of whether or not we must ACK for the sake of
937 * a window update.
938 */
Chris Leech0e4b4992006-05-23 18:00:16 -0700939void tcp_cleanup_rbuf(struct sock *sk, int copied)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940{
941 struct tcp_sock *tp = tcp_sk(sk);
942 int time_to_ack = 0;
943
944#if TCP_DEBUG
945 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
946
947 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
948#endif
949
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700950 if (inet_csk_ack_scheduled(sk)) {
951 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 /* Delayed ACKs frequently hit locked sockets during bulk
953 * receive. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700954 if (icsk->icsk_ack.blocked ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 /* Once-per-two-segments ACK was not sent by tcp_input.c */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700956 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 /*
958 * If this read emptied read buffer, we send ACK, if
959 * connection is not bidirectional, user drained
960 * receive buffer and there was a small segment
961 * in queue.
962 */
Alexey Kuznetsov1ef96962006-09-19 12:52:50 -0700963 (copied > 0 &&
964 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
965 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
966 !icsk->icsk_ack.pingpong)) &&
967 !atomic_read(&sk->sk_rmem_alloc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 time_to_ack = 1;
969 }
970
971 /* We send an ACK if we can now advertise a non-zero window
972 * which has been raised "significantly".
973 *
974 * Even if window raised up to infinity, do not send window open ACK
975 * in states, where we will not receive more. It is useless.
976 */
977 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
978 __u32 rcv_window_now = tcp_receive_window(tp);
979
980 /* Optimize, __tcp_select_window() is not cheap. */
981 if (2*rcv_window_now <= tp->window_clamp) {
982 __u32 new_window = __tcp_select_window(sk);
983
984 /* Send ACK now, if this read freed lots of space
985 * in our buffer. Certainly, new_window is new window.
986 * We can advertise it now, if it is not less than current one.
987 * "Lots" means "at least twice" here.
988 */
989 if (new_window && new_window >= 2 * rcv_window_now)
990 time_to_ack = 1;
991 }
992 }
993 if (time_to_ack)
994 tcp_send_ack(sk);
995}
996
997static void tcp_prequeue_process(struct sock *sk)
998{
999 struct sk_buff *skb;
1000 struct tcp_sock *tp = tcp_sk(sk);
1001
David S. Millerb03efcf2005-07-08 14:57:23 -07001002 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003
1004 /* RX process wants to run with disabled BHs, though it is not
1005 * necessary */
1006 local_bh_disable();
1007 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1008 sk->sk_backlog_rcv(sk, skb);
1009 local_bh_enable();
1010
1011 /* Clear memory counter. */
1012 tp->ucopy.memory = 0;
1013}
1014
1015static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1016{
1017 struct sk_buff *skb;
1018 u32 offset;
1019
1020 skb_queue_walk(&sk->sk_receive_queue, skb) {
1021 offset = seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001022 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 offset--;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001024 if (offset < skb->len || tcp_hdr(skb)->fin) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 *off = offset;
1026 return skb;
1027 }
1028 }
1029 return NULL;
1030}
1031
1032/*
1033 * This routine provides an alternative to tcp_recvmsg() for routines
1034 * that would like to handle copying from skbuffs directly in 'sendfile'
1035 * fashion.
1036 * Note:
1037 * - It is assumed that the socket was locked by the caller.
1038 * - The routine does not block.
1039 * - At present, there is no support for reading OOB data
1040 * or for 'peeking' the socket using this routine
1041 * (although both would be easy to implement).
1042 */
1043int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1044 sk_read_actor_t recv_actor)
1045{
1046 struct sk_buff *skb;
1047 struct tcp_sock *tp = tcp_sk(sk);
1048 u32 seq = tp->copied_seq;
1049 u32 offset;
1050 int copied = 0;
1051
1052 if (sk->sk_state == TCP_LISTEN)
1053 return -ENOTCONN;
1054 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1055 if (offset < skb->len) {
1056 size_t used, len;
1057
1058 len = skb->len - offset;
1059 /* Stop reading if we hit a patch of urgent data */
1060 if (tp->urg_data) {
1061 u32 urg_offset = tp->urg_seq - seq;
1062 if (urg_offset < len)
1063 len = urg_offset;
1064 if (!len)
1065 break;
1066 }
1067 used = recv_actor(desc, skb, offset, len);
Jens Axboeddb61a52007-06-23 23:07:50 -07001068 if (used < 0) {
1069 if (!copied)
1070 copied = used;
1071 break;
1072 } else if (used <= len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 seq += used;
1074 copied += used;
1075 offset += used;
1076 }
1077 if (offset != skb->len)
1078 break;
1079 }
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001080 if (tcp_hdr(skb)->fin) {
Chris Leech624d1162006-05-23 18:01:28 -07001081 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 ++seq;
1083 break;
1084 }
Chris Leech624d1162006-05-23 18:01:28 -07001085 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 if (!desc->count)
1087 break;
1088 }
1089 tp->copied_seq = seq;
1090
1091 tcp_rcv_space_adjust(sk);
1092
1093 /* Clean up data we have read: This will do ACK frames. */
Jens Axboeddb61a52007-06-23 23:07:50 -07001094 if (copied > 0)
Chris Leech0e4b4992006-05-23 18:00:16 -07001095 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 return copied;
1097}
1098
1099/*
1100 * This routine copies from a sock struct into the user buffer.
1101 *
1102 * Technical note: in 2.3 we work on _locked_ socket, so that
1103 * tricks with *seq access order and skb->users are not required.
1104 * Probably, code can be easily improved even more.
1105 */
1106
1107int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1108 size_t len, int nonblock, int flags, int *addr_len)
1109{
1110 struct tcp_sock *tp = tcp_sk(sk);
1111 int copied = 0;
1112 u32 peek_seq;
1113 u32 *seq;
1114 unsigned long used;
1115 int err;
1116 int target; /* Read at least this many bytes */
1117 long timeo;
1118 struct task_struct *user_recv = NULL;
Chris Leech1a2449a2006-05-23 18:05:53 -07001119 int copied_early = 0;
Chris Leech2b1244a2007-03-08 09:57:36 -08001120 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121
1122 lock_sock(sk);
1123
1124 TCP_CHECK_TIMER(sk);
1125
1126 err = -ENOTCONN;
1127 if (sk->sk_state == TCP_LISTEN)
1128 goto out;
1129
1130 timeo = sock_rcvtimeo(sk, nonblock);
1131
1132 /* Urgent data needs to be handled specially. */
1133 if (flags & MSG_OOB)
1134 goto recv_urg;
1135
1136 seq = &tp->copied_seq;
1137 if (flags & MSG_PEEK) {
1138 peek_seq = tp->copied_seq;
1139 seq = &peek_seq;
1140 }
1141
1142 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1143
Chris Leech1a2449a2006-05-23 18:05:53 -07001144#ifdef CONFIG_NET_DMA
1145 tp->ucopy.dma_chan = NULL;
1146 preempt_disable();
Chris Leech2b1244a2007-03-08 09:57:36 -08001147 skb = skb_peek_tail(&sk->sk_receive_queue);
Andrew Mortone00c5d82007-03-08 09:57:36 -08001148 {
1149 int available = 0;
1150
1151 if (skb)
1152 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1153 if ((available < target) &&
1154 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1155 !sysctl_tcp_low_latency &&
1156 __get_cpu_var(softnet_data).net_dma) {
1157 preempt_enable_no_resched();
1158 tp->ucopy.pinned_list =
1159 dma_pin_iovec_pages(msg->msg_iov, len);
1160 } else {
1161 preempt_enable_no_resched();
1162 }
1163 }
Chris Leech1a2449a2006-05-23 18:05:53 -07001164#endif
1165
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 u32 offset;
1168
1169 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1170 if (tp->urg_data && tp->urg_seq == *seq) {
1171 if (copied)
1172 break;
1173 if (signal_pending(current)) {
1174 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1175 break;
1176 }
1177 }
1178
1179 /* Next get a buffer. */
1180
1181 skb = skb_peek(&sk->sk_receive_queue);
1182 do {
1183 if (!skb)
1184 break;
1185
1186 /* Now that we have two receive queues this
1187 * shouldn't happen.
1188 */
1189 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1190 printk(KERN_INFO "recvmsg bug: copied %X "
1191 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1192 break;
1193 }
1194 offset = *seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001195 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 offset--;
1197 if (offset < skb->len)
1198 goto found_ok_skb;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001199 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 goto found_fin_ok;
1201 BUG_TRAP(flags & MSG_PEEK);
1202 skb = skb->next;
1203 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1204
1205 /* Well, if we have backlog, try to process it now yet. */
1206
1207 if (copied >= target && !sk->sk_backlog.tail)
1208 break;
1209
1210 if (copied) {
1211 if (sk->sk_err ||
1212 sk->sk_state == TCP_CLOSE ||
1213 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1214 !timeo ||
1215 signal_pending(current) ||
1216 (flags & MSG_PEEK))
1217 break;
1218 } else {
1219 if (sock_flag(sk, SOCK_DONE))
1220 break;
1221
1222 if (sk->sk_err) {
1223 copied = sock_error(sk);
1224 break;
1225 }
1226
1227 if (sk->sk_shutdown & RCV_SHUTDOWN)
1228 break;
1229
1230 if (sk->sk_state == TCP_CLOSE) {
1231 if (!sock_flag(sk, SOCK_DONE)) {
1232 /* This occurs when user tries to read
1233 * from never connected socket.
1234 */
1235 copied = -ENOTCONN;
1236 break;
1237 }
1238 break;
1239 }
1240
1241 if (!timeo) {
1242 copied = -EAGAIN;
1243 break;
1244 }
1245
1246 if (signal_pending(current)) {
1247 copied = sock_intr_errno(timeo);
1248 break;
1249 }
1250 }
1251
Chris Leech0e4b4992006-05-23 18:00:16 -07001252 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253
David S. Miller7df55122005-06-18 23:01:10 -07001254 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 /* Install new reader */
1256 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1257 user_recv = current;
1258 tp->ucopy.task = user_recv;
1259 tp->ucopy.iov = msg->msg_iov;
1260 }
1261
1262 tp->ucopy.len = len;
1263
1264 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1265 (flags & (MSG_PEEK | MSG_TRUNC)));
1266
1267 /* Ugly... If prequeue is not empty, we have to
1268 * process it before releasing socket, otherwise
1269 * order will be broken at second iteration.
1270 * More elegant solution is required!!!
1271 *
1272 * Look: we have the following (pseudo)queues:
1273 *
1274 * 1. packets in flight
1275 * 2. backlog
1276 * 3. prequeue
1277 * 4. receive_queue
1278 *
1279 * Each queue can be processed only if the next ones
1280 * are empty. At this point we have empty receive_queue.
1281 * But prequeue _can_ be not empty after 2nd iteration,
1282 * when we jumped to start of loop because backlog
1283 * processing added something to receive_queue.
1284 * We cannot release_sock(), because backlog contains
1285 * packets arrived _after_ prequeued ones.
1286 *
1287 * Shortly, algorithm is clear --- to process all
1288 * the queues in order. We could make it more directly,
1289 * requeueing packets from backlog to prequeue, if
1290 * is not empty. It is more elegant, but eats cycles,
1291 * unfortunately.
1292 */
David S. Millerb03efcf2005-07-08 14:57:23 -07001293 if (!skb_queue_empty(&tp->ucopy.prequeue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001294 goto do_prequeue;
1295
1296 /* __ Set realtime policy in scheduler __ */
1297 }
1298
1299 if (copied >= target) {
1300 /* Do not sleep, just process backlog. */
1301 release_sock(sk);
1302 lock_sock(sk);
1303 } else
1304 sk_wait_data(sk, &timeo);
1305
Chris Leech1a2449a2006-05-23 18:05:53 -07001306#ifdef CONFIG_NET_DMA
1307 tp->ucopy.wakeup = 0;
1308#endif
1309
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 if (user_recv) {
1311 int chunk;
1312
1313 /* __ Restore normal policy in scheduler __ */
1314
1315 if ((chunk = len - tp->ucopy.len) != 0) {
1316 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1317 len -= chunk;
1318 copied += chunk;
1319 }
1320
1321 if (tp->rcv_nxt == tp->copied_seq &&
David S. Millerb03efcf2005-07-08 14:57:23 -07001322 !skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323do_prequeue:
1324 tcp_prequeue_process(sk);
1325
1326 if ((chunk = len - tp->ucopy.len) != 0) {
1327 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1328 len -= chunk;
1329 copied += chunk;
1330 }
1331 }
1332 }
1333 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1334 if (net_ratelimit())
1335 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1336 current->comm, current->pid);
1337 peek_seq = tp->copied_seq;
1338 }
1339 continue;
1340
1341 found_ok_skb:
1342 /* Ok so how much can we use? */
1343 used = skb->len - offset;
1344 if (len < used)
1345 used = len;
1346
1347 /* Do we have urgent data here? */
1348 if (tp->urg_data) {
1349 u32 urg_offset = tp->urg_seq - *seq;
1350 if (urg_offset < used) {
1351 if (!urg_offset) {
1352 if (!sock_flag(sk, SOCK_URGINLINE)) {
1353 ++*seq;
1354 offset++;
1355 used--;
1356 if (!used)
1357 goto skip_copy;
1358 }
1359 } else
1360 used = urg_offset;
1361 }
1362 }
1363
1364 if (!(flags & MSG_TRUNC)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001365#ifdef CONFIG_NET_DMA
1366 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1367 tp->ucopy.dma_chan = get_softnet_dma();
1368
1369 if (tp->ucopy.dma_chan) {
1370 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1371 tp->ucopy.dma_chan, skb, offset,
1372 msg->msg_iov, used,
1373 tp->ucopy.pinned_list);
1374
1375 if (tp->ucopy.dma_cookie < 0) {
1376
1377 printk(KERN_ALERT "dma_cookie < 0\n");
1378
1379 /* Exception. Bailout! */
1380 if (!copied)
1381 copied = -EFAULT;
1382 break;
1383 }
1384 if ((offset + used) == skb->len)
1385 copied_early = 1;
1386
1387 } else
1388#endif
1389 {
1390 err = skb_copy_datagram_iovec(skb, offset,
1391 msg->msg_iov, used);
1392 if (err) {
1393 /* Exception. Bailout! */
1394 if (!copied)
1395 copied = -EFAULT;
1396 break;
1397 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 }
1399 }
1400
1401 *seq += used;
1402 copied += used;
1403 len -= used;
1404
1405 tcp_rcv_space_adjust(sk);
1406
1407skip_copy:
1408 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1409 tp->urg_data = 0;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001410 tcp_fast_path_check(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 }
1412 if (used + offset < skb->len)
1413 continue;
1414
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001415 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 goto found_fin_ok;
Chris Leech1a2449a2006-05-23 18:05:53 -07001417 if (!(flags & MSG_PEEK)) {
1418 sk_eat_skb(sk, skb, copied_early);
1419 copied_early = 0;
1420 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 continue;
1422
1423 found_fin_ok:
1424 /* Process the FIN. */
1425 ++*seq;
Chris Leech1a2449a2006-05-23 18:05:53 -07001426 if (!(flags & MSG_PEEK)) {
1427 sk_eat_skb(sk, skb, copied_early);
1428 copied_early = 0;
1429 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 break;
1431 } while (len > 0);
1432
1433 if (user_recv) {
David S. Millerb03efcf2005-07-08 14:57:23 -07001434 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 int chunk;
1436
1437 tp->ucopy.len = copied > 0 ? len : 0;
1438
1439 tcp_prequeue_process(sk);
1440
1441 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1442 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1443 len -= chunk;
1444 copied += chunk;
1445 }
1446 }
1447
1448 tp->ucopy.task = NULL;
1449 tp->ucopy.len = 0;
1450 }
1451
Chris Leech1a2449a2006-05-23 18:05:53 -07001452#ifdef CONFIG_NET_DMA
1453 if (tp->ucopy.dma_chan) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001454 dma_cookie_t done, used;
1455
1456 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1457
1458 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001459 tp->ucopy.dma_cookie, &done,
1460 &used) == DMA_IN_PROGRESS) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001461 /* do partial cleanup of sk_async_wait_queue */
1462 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1463 (dma_async_is_complete(skb->dma_cookie, done,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001464 used) == DMA_SUCCESS)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001465 __skb_dequeue(&sk->sk_async_wait_queue);
1466 kfree_skb(skb);
1467 }
1468 }
1469
1470 /* Safe to free early-copied skbs now */
1471 __skb_queue_purge(&sk->sk_async_wait_queue);
1472 dma_chan_put(tp->ucopy.dma_chan);
1473 tp->ucopy.dma_chan = NULL;
1474 }
1475 if (tp->ucopy.pinned_list) {
1476 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1477 tp->ucopy.pinned_list = NULL;
1478 }
1479#endif
1480
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 /* According to UNIX98, msg_name/msg_namelen are ignored
1482 * on connected socket. I was just happy when found this 8) --ANK
1483 */
1484
1485 /* Clean up data we have read: This will do ACK frames. */
Chris Leech0e4b4992006-05-23 18:00:16 -07001486 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
1488 TCP_CHECK_TIMER(sk);
1489 release_sock(sk);
1490 return copied;
1491
1492out:
1493 TCP_CHECK_TIMER(sk);
1494 release_sock(sk);
1495 return err;
1496
1497recv_urg:
1498 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1499 goto out;
1500}
1501
1502/*
1503 * State processing on a close. This implements the state shift for
1504 * sending our FIN frame. Note that we only send a FIN for some
1505 * states. A shutdown() may have already sent the FIN, or we may be
1506 * closed.
1507 */
1508
Arjan van de Ven9b5b5cf2005-11-29 16:21:38 -08001509static const unsigned char new_state[16] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 /* current state: new state: action: */
1511 /* (Invalid) */ TCP_CLOSE,
1512 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1513 /* TCP_SYN_SENT */ TCP_CLOSE,
1514 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1515 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1516 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1517 /* TCP_TIME_WAIT */ TCP_CLOSE,
1518 /* TCP_CLOSE */ TCP_CLOSE,
1519 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1520 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1521 /* TCP_LISTEN */ TCP_CLOSE,
1522 /* TCP_CLOSING */ TCP_CLOSING,
1523};
1524
1525static int tcp_close_state(struct sock *sk)
1526{
1527 int next = (int)new_state[sk->sk_state];
1528 int ns = next & TCP_STATE_MASK;
1529
1530 tcp_set_state(sk, ns);
1531
1532 return next & TCP_ACTION_FIN;
1533}
1534
1535/*
1536 * Shutdown the sending side of a connection. Much like close except
1537 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1538 */
1539
1540void tcp_shutdown(struct sock *sk, int how)
1541{
1542 /* We need to grab some memory, and put together a FIN,
1543 * and then put it into the queue to be sent.
1544 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1545 */
1546 if (!(how & SEND_SHUTDOWN))
1547 return;
1548
1549 /* If we've already sent a FIN, or it's a closed state, skip this. */
1550 if ((1 << sk->sk_state) &
1551 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1552 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1553 /* Clear out any half completed packets. FIN if needed. */
1554 if (tcp_close_state(sk))
1555 tcp_send_fin(sk);
1556 }
1557}
1558
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559void tcp_close(struct sock *sk, long timeout)
1560{
1561 struct sk_buff *skb;
1562 int data_was_unread = 0;
Herbert Xu75c2d9072006-05-03 23:31:35 -07001563 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001564
1565 lock_sock(sk);
1566 sk->sk_shutdown = SHUTDOWN_MASK;
1567
1568 if (sk->sk_state == TCP_LISTEN) {
1569 tcp_set_state(sk, TCP_CLOSE);
1570
1571 /* Special case. */
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001572 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001573
1574 goto adjudge_to_death;
1575 }
1576
1577 /* We need to flush the recv. buffs. We do this only on the
1578 * descriptor close, not protocol-sourced closes, because the
1579 * reader process may not have drained the data yet!
1580 */
1581 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1582 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001583 tcp_hdr(skb)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 data_was_unread += len;
1585 __kfree_skb(skb);
1586 }
1587
1588 sk_stream_mem_reclaim(sk);
1589
Gerrit Renker65bb7232007-04-28 21:21:46 -07001590 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1591 * data was lost. To witness the awful effects of the old behavior of
1592 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1593 * GET in an FTP client, suspend the process, wait for the client to
1594 * advertise a zero window, then kill -9 the FTP client, wheee...
1595 * Note: timeout is always zero in such a case.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 */
1597 if (data_was_unread) {
1598 /* Unread data was tossed, zap the connection. */
1599 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1600 tcp_set_state(sk, TCP_CLOSE);
1601 tcp_send_active_reset(sk, GFP_KERNEL);
1602 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1603 /* Check zero linger _after_ checking for unread data. */
1604 sk->sk_prot->disconnect(sk, 0);
1605 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1606 } else if (tcp_close_state(sk)) {
1607 /* We FIN if the application ate all the data before
1608 * zapping the connection.
1609 */
1610
1611 /* RED-PEN. Formally speaking, we have broken TCP state
1612 * machine. State transitions:
1613 *
1614 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1615 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1616 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1617 *
1618 * are legal only when FIN has been sent (i.e. in window),
1619 * rather than queued out of window. Purists blame.
1620 *
1621 * F.e. "RFC state" is ESTABLISHED,
1622 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1623 *
1624 * The visible declinations are that sometimes
1625 * we enter time-wait state, when it is not required really
1626 * (harmless), do not send active resets, when they are
1627 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1628 * they look as CLOSING or LAST_ACK for Linux)
1629 * Probably, I missed some more holelets.
1630 * --ANK
1631 */
1632 tcp_send_fin(sk);
1633 }
1634
1635 sk_stream_wait_close(sk, timeout);
1636
1637adjudge_to_death:
Herbert Xu75c2d9072006-05-03 23:31:35 -07001638 state = sk->sk_state;
1639 sock_hold(sk);
1640 sock_orphan(sk);
1641 atomic_inc(sk->sk_prot->orphan_count);
1642
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643 /* It is the last release_sock in its life. It will remove backlog. */
1644 release_sock(sk);
1645
1646
1647 /* Now socket is owned by kernel and we acquire BH lock
1648 to finish close. No need to check for user refs.
1649 */
1650 local_bh_disable();
1651 bh_lock_sock(sk);
1652 BUG_TRAP(!sock_owned_by_user(sk));
1653
Herbert Xu75c2d9072006-05-03 23:31:35 -07001654 /* Have we already been destroyed by a softirq or backlog? */
1655 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1656 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657
1658 /* This is a (useful) BSD violating of the RFC. There is a
1659 * problem with TCP as specified in that the other end could
1660 * keep a socket open forever with no application left this end.
1661 * We use a 3 minute timeout (about the same as BSD) then kill
1662 * our end. If they send after that then tough - BUT: long enough
1663 * that we won't make the old 4*rto = almost no time - whoops
1664 * reset mistake.
1665 *
1666 * Nope, it was not mistake. It is really desired behaviour
1667 * f.e. on http servers, when such sockets are useless, but
1668 * consume significant resources. Let's do it with special
1669 * linger2 option. --ANK
1670 */
1671
1672 if (sk->sk_state == TCP_FIN_WAIT2) {
1673 struct tcp_sock *tp = tcp_sk(sk);
1674 if (tp->linger2 < 0) {
1675 tcp_set_state(sk, TCP_CLOSE);
1676 tcp_send_active_reset(sk, GFP_ATOMIC);
1677 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1678 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001679 const int tmo = tcp_fin_time(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680
1681 if (tmo > TCP_TIMEWAIT_LEN) {
David S. Miller52499af2006-07-31 22:32:09 -07001682 inet_csk_reset_keepalive_timer(sk,
1683 tmo - TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1686 goto out;
1687 }
1688 }
1689 }
1690 if (sk->sk_state != TCP_CLOSE) {
1691 sk_stream_mem_reclaim(sk);
Pavel Emelianove4fd5da2007-05-29 13:19:18 -07001692 if (tcp_too_many_orphans(sk,
1693 atomic_read(sk->sk_prot->orphan_count))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694 if (net_ratelimit())
1695 printk(KERN_INFO "TCP: too many of orphaned "
1696 "sockets\n");
1697 tcp_set_state(sk, TCP_CLOSE);
1698 tcp_send_active_reset(sk, GFP_ATOMIC);
1699 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1700 }
1701 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
1703 if (sk->sk_state == TCP_CLOSE)
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001704 inet_csk_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 /* Otherwise, socket is reprieved until protocol close. */
1706
1707out:
1708 bh_unlock_sock(sk);
1709 local_bh_enable();
1710 sock_put(sk);
1711}
1712
1713/* These states need RST on ABORT according to RFC793 */
1714
1715static inline int tcp_need_reset(int state)
1716{
1717 return (1 << state) &
1718 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1719 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1720}
1721
1722int tcp_disconnect(struct sock *sk, int flags)
1723{
1724 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001725 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 struct tcp_sock *tp = tcp_sk(sk);
1727 int err = 0;
1728 int old_state = sk->sk_state;
1729
1730 if (old_state != TCP_CLOSE)
1731 tcp_set_state(sk, TCP_CLOSE);
1732
1733 /* ABORT function of RFC793 */
1734 if (old_state == TCP_LISTEN) {
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001735 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 } else if (tcp_need_reset(old_state) ||
1737 (tp->snd_nxt != tp->write_seq &&
1738 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001739 /* The last check adjusts for discrepancy of Linux wrt. RFC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 * states
1741 */
1742 tcp_send_active_reset(sk, gfp_any());
1743 sk->sk_err = ECONNRESET;
1744 } else if (old_state == TCP_SYN_SENT)
1745 sk->sk_err = ECONNRESET;
1746
1747 tcp_clear_xmit_timers(sk);
1748 __skb_queue_purge(&sk->sk_receive_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001749 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 __skb_queue_purge(&tp->out_of_order_queue);
Chris Leech1a2449a2006-05-23 18:05:53 -07001751#ifdef CONFIG_NET_DMA
1752 __skb_queue_purge(&sk->sk_async_wait_queue);
1753#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
1755 inet->dport = 0;
1756
1757 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1758 inet_reset_saddr(sk);
1759
1760 sk->sk_shutdown = 0;
1761 sock_reset_flag(sk, SOCK_DONE);
1762 tp->srtt = 0;
1763 if ((tp->write_seq += tp->max_window + 2) == 0)
1764 tp->write_seq = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001765 icsk->icsk_backoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766 tp->snd_cwnd = 2;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001767 icsk->icsk_probes_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 tp->packets_out = 0;
1769 tp->snd_ssthresh = 0x7fffffff;
1770 tp->snd_cwnd_cnt = 0;
Stephen Hemminger9772efb2005-11-10 17:09:53 -08001771 tp->bytes_acked = 0;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001772 tcp_set_ca_state(sk, TCP_CA_Open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 tcp_clear_retrans(tp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001774 inet_csk_delack_init(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001775 tcp_init_send_head(sk);
Srinivas Ajib40b4f72007-05-03 17:32:28 -07001776 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777 __sk_dst_reset(sk);
1778
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001779 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
1781 sk->sk_error_report(sk);
1782 return err;
1783}
1784
1785/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 * Socket option code for TCP.
1787 */
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001788static int do_tcp_setsockopt(struct sock *sk, int level,
1789 int optname, char __user *optval, int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790{
1791 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001792 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001793 int val;
1794 int err = 0;
1795
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001796 /* This is a string value all the others are int's */
1797 if (optname == TCP_CONGESTION) {
1798 char name[TCP_CA_NAME_MAX];
1799
1800 if (optlen < 1)
1801 return -EINVAL;
1802
1803 val = strncpy_from_user(name, optval,
1804 min(TCP_CA_NAME_MAX-1, optlen));
1805 if (val < 0)
1806 return -EFAULT;
1807 name[val] = 0;
1808
1809 lock_sock(sk);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001810 err = tcp_set_congestion_control(sk, name);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001811 release_sock(sk);
1812 return err;
1813 }
1814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 if (optlen < sizeof(int))
1816 return -EINVAL;
1817
1818 if (get_user(val, (int __user *)optval))
1819 return -EFAULT;
1820
1821 lock_sock(sk);
1822
1823 switch (optname) {
1824 case TCP_MAXSEG:
1825 /* Values greater than interface MTU won't take effect. However
1826 * at the point when this call is done we typically don't yet
1827 * know which interface is going to be used */
1828 if (val < 8 || val > MAX_TCP_WINDOW) {
1829 err = -EINVAL;
1830 break;
1831 }
1832 tp->rx_opt.user_mss = val;
1833 break;
1834
1835 case TCP_NODELAY:
1836 if (val) {
1837 /* TCP_NODELAY is weaker than TCP_CORK, so that
1838 * this option on corked socket is remembered, but
1839 * it is not activated until cork is cleared.
1840 *
1841 * However, when TCP_NODELAY is set we make
1842 * an explicit push, which overrides even TCP_CORK
1843 * for currently queued segments.
1844 */
1845 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001846 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 } else {
1848 tp->nonagle &= ~TCP_NAGLE_OFF;
1849 }
1850 break;
1851
1852 case TCP_CORK:
1853 /* When set indicates to always queue non-full frames.
1854 * Later the user clears this option and we transmit
1855 * any pending partial frames in the queue. This is
1856 * meant to be used alongside sendfile() to get properly
1857 * filled frames when the user (for example) must write
1858 * out headers with a write() call first and then use
1859 * sendfile to send out the data parts.
1860 *
1861 * TCP_CORK can be set together with TCP_NODELAY and it is
1862 * stronger than TCP_NODELAY.
1863 */
1864 if (val) {
1865 tp->nonagle |= TCP_NAGLE_CORK;
1866 } else {
1867 tp->nonagle &= ~TCP_NAGLE_CORK;
1868 if (tp->nonagle&TCP_NAGLE_OFF)
1869 tp->nonagle |= TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001870 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001871 }
1872 break;
1873
1874 case TCP_KEEPIDLE:
1875 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1876 err = -EINVAL;
1877 else {
1878 tp->keepalive_time = val * HZ;
1879 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1880 !((1 << sk->sk_state) &
1881 (TCPF_CLOSE | TCPF_LISTEN))) {
1882 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1883 if (tp->keepalive_time > elapsed)
1884 elapsed = tp->keepalive_time - elapsed;
1885 else
1886 elapsed = 0;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001887 inet_csk_reset_keepalive_timer(sk, elapsed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888 }
1889 }
1890 break;
1891 case TCP_KEEPINTVL:
1892 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1893 err = -EINVAL;
1894 else
1895 tp->keepalive_intvl = val * HZ;
1896 break;
1897 case TCP_KEEPCNT:
1898 if (val < 1 || val > MAX_TCP_KEEPCNT)
1899 err = -EINVAL;
1900 else
1901 tp->keepalive_probes = val;
1902 break;
1903 case TCP_SYNCNT:
1904 if (val < 1 || val > MAX_TCP_SYNCNT)
1905 err = -EINVAL;
1906 else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001907 icsk->icsk_syn_retries = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 break;
1909
1910 case TCP_LINGER2:
1911 if (val < 0)
1912 tp->linger2 = -1;
1913 else if (val > sysctl_tcp_fin_timeout / HZ)
1914 tp->linger2 = 0;
1915 else
1916 tp->linger2 = val * HZ;
1917 break;
1918
1919 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001920 icsk->icsk_accept_queue.rskq_defer_accept = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 if (val > 0) {
1922 /* Translate value in seconds to number of
1923 * retransmits */
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001924 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001925 val > ((TCP_TIMEOUT_INIT / HZ) <<
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001926 icsk->icsk_accept_queue.rskq_defer_accept))
1927 icsk->icsk_accept_queue.rskq_defer_accept++;
1928 icsk->icsk_accept_queue.rskq_defer_accept++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001929 }
1930 break;
1931
1932 case TCP_WINDOW_CLAMP:
1933 if (!val) {
1934 if (sk->sk_state != TCP_CLOSE) {
1935 err = -EINVAL;
1936 break;
1937 }
1938 tp->window_clamp = 0;
1939 } else
1940 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1941 SOCK_MIN_RCVBUF / 2 : val;
1942 break;
1943
1944 case TCP_QUICKACK:
1945 if (!val) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001946 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001948 icsk->icsk_ack.pingpong = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949 if ((1 << sk->sk_state) &
1950 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001951 inet_csk_ack_scheduled(sk)) {
1952 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
Chris Leech0e4b4992006-05-23 18:00:16 -07001953 tcp_cleanup_rbuf(sk, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 if (!(val & 1))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001955 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001956 }
1957 }
1958 break;
1959
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001960#ifdef CONFIG_TCP_MD5SIG
1961 case TCP_MD5SIG:
1962 /* Read the IP->Key mappings from userspace */
1963 err = tp->af_specific->md5_parse(sk, optval, optlen);
1964 break;
1965#endif
1966
Linus Torvalds1da177e2005-04-16 15:20:36 -07001967 default:
1968 err = -ENOPROTOOPT;
1969 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07001970 }
1971
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 release_sock(sk);
1973 return err;
1974}
1975
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001976int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1977 int optlen)
1978{
1979 struct inet_connection_sock *icsk = inet_csk(sk);
1980
1981 if (level != SOL_TCP)
1982 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1983 optval, optlen);
1984 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1985}
1986
1987#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001988int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1989 char __user *optval, int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001990{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001991 if (level != SOL_TCP)
1992 return inet_csk_compat_setsockopt(sk, level, optname,
1993 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001994 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1995}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001996
1997EXPORT_SYMBOL(compat_tcp_setsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001998#endif
1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000/* Return information about state of tcp endpoint in API format. */
2001void tcp_get_info(struct sock *sk, struct tcp_info *info)
2002{
2003 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002004 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 u32 now = tcp_time_stamp;
2006
2007 memset(info, 0, sizeof(*info));
2008
2009 info->tcpi_state = sk->sk_state;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002010 info->tcpi_ca_state = icsk->icsk_ca_state;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002011 info->tcpi_retransmits = icsk->icsk_retransmits;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002012 info->tcpi_probes = icsk->icsk_probes_out;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002013 info->tcpi_backoff = icsk->icsk_backoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014
2015 if (tp->rx_opt.tstamp_ok)
2016 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
Ilpo Järvinene60402d2007-08-09 15:14:46 +03002017 if (tcp_is_sack(tp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018 info->tcpi_options |= TCPI_OPT_SACK;
2019 if (tp->rx_opt.wscale_ok) {
2020 info->tcpi_options |= TCPI_OPT_WSCALE;
2021 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2022 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002023 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024
2025 if (tp->ecn_flags&TCP_ECN_OK)
2026 info->tcpi_options |= TCPI_OPT_ECN;
2027
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002028 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2029 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002030 info->tcpi_snd_mss = tp->mss_cache;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002031 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032
2033 info->tcpi_unacked = tp->packets_out;
2034 info->tcpi_sacked = tp->sacked_out;
2035 info->tcpi_lost = tp->lost_out;
2036 info->tcpi_retrans = tp->retrans_out;
2037 info->tcpi_fackets = tp->fackets_out;
2038
2039 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002040 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2042
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002043 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2045 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2046 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2047 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2048 info->tcpi_snd_cwnd = tp->snd_cwnd;
2049 info->tcpi_advmss = tp->advmss;
2050 info->tcpi_reordering = tp->reordering;
2051
2052 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2053 info->tcpi_rcv_space = tp->rcvq_space.space;
2054
2055 info->tcpi_total_retrans = tp->total_retrans;
2056}
2057
2058EXPORT_SYMBOL_GPL(tcp_get_info);
2059
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002060static int do_tcp_getsockopt(struct sock *sk, int level,
2061 int optname, char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062{
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002063 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 struct tcp_sock *tp = tcp_sk(sk);
2065 int val, len;
2066
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 if (get_user(len, optlen))
2068 return -EFAULT;
2069
2070 len = min_t(unsigned int, len, sizeof(int));
2071
2072 if (len < 0)
2073 return -EINVAL;
2074
2075 switch (optname) {
2076 case TCP_MAXSEG:
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002077 val = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2079 val = tp->rx_opt.user_mss;
2080 break;
2081 case TCP_NODELAY:
2082 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2083 break;
2084 case TCP_CORK:
2085 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2086 break;
2087 case TCP_KEEPIDLE:
2088 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2089 break;
2090 case TCP_KEEPINTVL:
2091 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2092 break;
2093 case TCP_KEEPCNT:
2094 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2095 break;
2096 case TCP_SYNCNT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002097 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098 break;
2099 case TCP_LINGER2:
2100 val = tp->linger2;
2101 if (val >= 0)
2102 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2103 break;
2104 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002105 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2106 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 break;
2108 case TCP_WINDOW_CLAMP:
2109 val = tp->window_clamp;
2110 break;
2111 case TCP_INFO: {
2112 struct tcp_info info;
2113
2114 if (get_user(len, optlen))
2115 return -EFAULT;
2116
2117 tcp_get_info(sk, &info);
2118
2119 len = min_t(unsigned int, len, sizeof(info));
2120 if (put_user(len, optlen))
2121 return -EFAULT;
2122 if (copy_to_user(optval, &info, len))
2123 return -EFAULT;
2124 return 0;
2125 }
2126 case TCP_QUICKACK:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002127 val = !icsk->icsk_ack.pingpong;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 break;
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002129
2130 case TCP_CONGESTION:
2131 if (get_user(len, optlen))
2132 return -EFAULT;
2133 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2134 if (put_user(len, optlen))
2135 return -EFAULT;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002136 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002137 return -EFAULT;
2138 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 default:
2140 return -ENOPROTOOPT;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
2143 if (put_user(len, optlen))
2144 return -EFAULT;
2145 if (copy_to_user(optval, &val, len))
2146 return -EFAULT;
2147 return 0;
2148}
2149
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002150int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2151 int __user *optlen)
2152{
2153 struct inet_connection_sock *icsk = inet_csk(sk);
2154
2155 if (level != SOL_TCP)
2156 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2157 optval, optlen);
2158 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2159}
2160
2161#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002162int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2163 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002164{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08002165 if (level != SOL_TCP)
2166 return inet_csk_compat_getsockopt(sk, level, optname,
2167 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002168 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2169}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002170
2171EXPORT_SYMBOL(compat_tcp_getsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002172#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173
Herbert Xu576a30e2006-06-27 13:22:38 -07002174struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07002175{
2176 struct sk_buff *segs = ERR_PTR(-EINVAL);
2177 struct tcphdr *th;
2178 unsigned thlen;
2179 unsigned int seq;
Al Virod3bc23e2006-11-14 21:24:49 -08002180 __be32 delta;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002181 unsigned int oldlen;
2182 unsigned int len;
2183
2184 if (!pskb_may_pull(skb, sizeof(*th)))
2185 goto out;
2186
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002187 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002188 thlen = th->doff * 4;
2189 if (thlen < sizeof(*th))
2190 goto out;
2191
2192 if (!pskb_may_pull(skb, thlen))
2193 goto out;
2194
Herbert Xu0718bcc2006-06-25 23:55:46 -07002195 oldlen = (u16)~skb->len;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002196 __skb_pull(skb, thlen);
2197
Herbert Xu3820c3f2006-06-29 20:11:25 -07002198 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2199 /* Packet is from an untrusted source, reset gso_segs. */
Herbert Xubbcf4672006-07-03 19:38:35 -07002200 int type = skb_shinfo(skb)->gso_type;
2201 int mss;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002202
Herbert Xubbcf4672006-07-03 19:38:35 -07002203 if (unlikely(type &
2204 ~(SKB_GSO_TCPV4 |
2205 SKB_GSO_DODGY |
2206 SKB_GSO_TCP_ECN |
2207 SKB_GSO_TCPV6 |
2208 0) ||
2209 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2210 goto out;
2211
2212 mss = skb_shinfo(skb)->gso_size;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002213 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2214
2215 segs = NULL;
2216 goto out;
2217 }
2218
Herbert Xu576a30e2006-06-27 13:22:38 -07002219 segs = skb_segment(skb, features);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002220 if (IS_ERR(segs))
2221 goto out;
2222
2223 len = skb_shinfo(skb)->gso_size;
Herbert Xu0718bcc2006-06-25 23:55:46 -07002224 delta = htonl(oldlen + (thlen + len));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002225
2226 skb = segs;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002227 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002228 seq = ntohl(th->seq);
2229
2230 do {
2231 th->fin = th->psh = 0;
2232
Al Virod3bc23e2006-11-14 21:24:49 -08002233 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2234 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002235 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002236 th->check =
2237 csum_fold(csum_partial(skb_transport_header(skb),
2238 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002239
2240 seq += len;
2241 skb = skb->next;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002242 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002243
2244 th->seq = htonl(seq);
2245 th->cwr = 0;
2246 } while (skb->next);
2247
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002248 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002249 skb->data_len);
Al Virod3bc23e2006-11-14 21:24:49 -08002250 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2251 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002252 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002253 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2254 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002255
2256out:
2257 return segs;
2258}
Herbert Xuadcfc7d2006-06-30 13:36:15 -07002259EXPORT_SYMBOL(tcp_tso_segment);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002260
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002261#ifdef CONFIG_TCP_MD5SIG
2262static unsigned long tcp_md5sig_users;
2263static struct tcp_md5sig_pool **tcp_md5sig_pool;
2264static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2265
2266static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2267{
2268 int cpu;
2269 for_each_possible_cpu(cpu) {
2270 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2271 if (p) {
2272 if (p->md5_desc.tfm)
2273 crypto_free_hash(p->md5_desc.tfm);
2274 kfree(p);
2275 p = NULL;
2276 }
2277 }
2278 free_percpu(pool);
2279}
2280
2281void tcp_free_md5sig_pool(void)
2282{
2283 struct tcp_md5sig_pool **pool = NULL;
2284
David S. Miller2c4f6212007-02-20 23:51:47 -08002285 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002286 if (--tcp_md5sig_users == 0) {
2287 pool = tcp_md5sig_pool;
2288 tcp_md5sig_pool = NULL;
2289 }
David S. Miller2c4f6212007-02-20 23:51:47 -08002290 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002291 if (pool)
2292 __tcp_free_md5sig_pool(pool);
2293}
2294
2295EXPORT_SYMBOL(tcp_free_md5sig_pool);
2296
Adrian Bunkf5b99bc2006-11-30 17:22:29 -08002297static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002298{
2299 int cpu;
2300 struct tcp_md5sig_pool **pool;
2301
2302 pool = alloc_percpu(struct tcp_md5sig_pool *);
2303 if (!pool)
2304 return NULL;
2305
2306 for_each_possible_cpu(cpu) {
2307 struct tcp_md5sig_pool *p;
2308 struct crypto_hash *hash;
2309
2310 p = kzalloc(sizeof(*p), GFP_KERNEL);
2311 if (!p)
2312 goto out_free;
2313 *per_cpu_ptr(pool, cpu) = p;
2314
2315 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2316 if (!hash || IS_ERR(hash))
2317 goto out_free;
2318
2319 p->md5_desc.tfm = hash;
2320 }
2321 return pool;
2322out_free:
2323 __tcp_free_md5sig_pool(pool);
2324 return NULL;
2325}
2326
2327struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2328{
2329 struct tcp_md5sig_pool **pool;
2330 int alloc = 0;
2331
2332retry:
David S. Miller2c4f6212007-02-20 23:51:47 -08002333 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002334 pool = tcp_md5sig_pool;
2335 if (tcp_md5sig_users++ == 0) {
2336 alloc = 1;
David S. Miller2c4f6212007-02-20 23:51:47 -08002337 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002338 } else if (!pool) {
2339 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002340 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002341 cpu_relax();
2342 goto retry;
2343 } else
David S. Miller2c4f6212007-02-20 23:51:47 -08002344 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002345
2346 if (alloc) {
2347 /* we cannot hold spinlock here because this may sleep. */
2348 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
David S. Miller2c4f6212007-02-20 23:51:47 -08002349 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002350 if (!p) {
2351 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002352 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002353 return NULL;
2354 }
2355 pool = tcp_md5sig_pool;
2356 if (pool) {
2357 /* oops, it has already been assigned. */
David S. Miller2c4f6212007-02-20 23:51:47 -08002358 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002359 __tcp_free_md5sig_pool(p);
2360 } else {
2361 tcp_md5sig_pool = pool = p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002362 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002363 }
2364 }
2365 return pool;
2366}
2367
2368EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2369
2370struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2371{
2372 struct tcp_md5sig_pool **p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002373 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002374 p = tcp_md5sig_pool;
2375 if (p)
2376 tcp_md5sig_users++;
David S. Miller2c4f6212007-02-20 23:51:47 -08002377 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002378 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2379}
2380
2381EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2382
David S. Miller6931ba72006-12-13 16:25:44 -08002383void __tcp_put_md5sig_pool(void)
2384{
2385 tcp_free_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002386}
2387
2388EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2389#endif
2390
Andi Kleen4ac02ba2007-04-20 17:11:46 -07002391void tcp_done(struct sock *sk)
2392{
2393 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2394 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2395
2396 tcp_set_state(sk, TCP_CLOSE);
2397 tcp_clear_xmit_timers(sk);
2398
2399 sk->sk_shutdown = SHUTDOWN_MASK;
2400
2401 if (!sock_flag(sk, SOCK_DEAD))
2402 sk->sk_state_change(sk);
2403 else
2404 inet_csk_destroy_sock(sk);
2405}
2406EXPORT_SYMBOL_GPL(tcp_done);
2407
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408extern void __skb_cb_too_small_for_tcp(int, int);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002409extern struct tcp_congestion_ops tcp_reno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
2411static __initdata unsigned long thash_entries;
2412static int __init set_thash_entries(char *str)
2413{
2414 if (!str)
2415 return 0;
2416 thash_entries = simple_strtoul(str, &str, 0);
2417 return 1;
2418}
2419__setup("thash_entries=", set_thash_entries);
2420
2421void __init tcp_init(void)
2422{
2423 struct sk_buff *skb = NULL;
John Heffner7b4f4b52006-03-25 01:34:07 -08002424 unsigned long limit;
2425 int order, i, max_share;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426
2427 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2428 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2429 sizeof(skb->cb));
2430
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002431 tcp_hashinfo.bind_bucket_cachep =
2432 kmem_cache_create("tcp_bind_bucket",
2433 sizeof(struct inet_bind_bucket), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002434 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002435
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 /* Size and allocate the main established and bind bucket
2437 * hash tables.
2438 *
2439 * The methodology is similar to that of the buffer cache.
2440 */
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002441 tcp_hashinfo.ehash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442 alloc_large_system_hash("TCP established",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002443 sizeof(struct inet_ehash_bucket),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 thash_entries,
2445 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002446 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002447 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002448 &tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 NULL,
2450 0);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002451 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2452 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002453 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2454 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002455 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 }
2457
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002458 tcp_hashinfo.bhash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 alloc_large_system_hash("TCP bind",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002460 sizeof(struct inet_bind_hashbucket),
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002461 tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002463 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002464 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002465 &tcp_hashinfo.bhash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 NULL,
2467 64 * 1024);
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002468 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2469 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2470 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2471 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 }
2473
2474 /* Try to be a bit smarter and adjust defaults depending
2475 * on available memory.
2476 */
2477 for (order = 0; ((1 << order) << PAGE_SHIFT) <
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002478 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479 order++)
2480 ;
Andi Kleene7626482005-06-13 14:24:52 -07002481 if (order >= 4) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002482 tcp_death_row.sysctl_max_tw_buckets = 180000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483 sysctl_tcp_max_orphans = 4096 << (order - 4);
2484 sysctl_max_syn_backlog = 1024;
2485 } else if (order < 3) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002486 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 sysctl_tcp_max_orphans >>= (3 - order);
2488 sysctl_max_syn_backlog = 128;
2489 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490
John Heffner53cdcc02007-03-16 15:04:03 -07002491 /* Set the pressure threshold to be a fraction of global memory that
2492 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2493 * memory, with a floor of 128 pages.
2494 */
2495 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2496 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2497 limit = max(limit, 128UL);
2498 sysctl_tcp_mem[0] = limit / 4 * 3;
2499 sysctl_tcp_mem[1] = limit;
John Heffner52bf3762006-11-14 20:25:17 -08002500 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501
John Heffner53cdcc02007-03-16 15:04:03 -07002502 /* Set per-socket limits to no more than 1/128 the pressure threshold */
John Heffner7b4f4b52006-03-25 01:34:07 -08002503 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2504 max_share = min(4UL*1024*1024, limit);
2505
2506 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2507 sysctl_tcp_wmem[1] = 16*1024;
2508 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2509
2510 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2511 sysctl_tcp_rmem[1] = 87380;
2512 sysctl_tcp_rmem[2] = max(87380, max_share);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
2514 printk(KERN_INFO "TCP: Hash tables configured "
2515 "(established %d bind %d)\n",
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002516 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002517
2518 tcp_register_congestion_control(&tcp_reno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519}
2520
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521EXPORT_SYMBOL(tcp_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522EXPORT_SYMBOL(tcp_disconnect);
2523EXPORT_SYMBOL(tcp_getsockopt);
2524EXPORT_SYMBOL(tcp_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525EXPORT_SYMBOL(tcp_poll);
2526EXPORT_SYMBOL(tcp_read_sock);
2527EXPORT_SYMBOL(tcp_recvmsg);
2528EXPORT_SYMBOL(tcp_sendmsg);
2529EXPORT_SYMBOL(tcp_sendpage);
2530EXPORT_SYMBOL(tcp_setsockopt);
2531EXPORT_SYMBOL(tcp_shutdown);
2532EXPORT_SYMBOL(tcp_statistics);