blob: 2cbfa6df7976cc5a732ac9944ee26380054794de [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
Ilpo Järvinen172589c2007-08-28 15:50:33 -0700250#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251#include <linux/module.h>
252#include <linux/types.h>
253#include <linux/fcntl.h>
254#include <linux/poll.h>
255#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256#include <linux/fs.h>
Jens Axboe9c55e012007-11-06 23:30:13 -0800257#include <linux/skbuff.h>
258#include <linux/splice.h>
259#include <linux/net.h>
260#include <linux/socket.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261#include <linux/random.h>
262#include <linux/bootmem.h>
David S. Millerb8059ea2006-03-25 01:36:56 -0800263#include <linux/cache.h>
Herbert Xuf4c50d92006-06-22 03:02:40 -0700264#include <linux/err.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800265#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
267#include <net/icmp.h>
268#include <net/tcp.h>
269#include <net/xfrm.h>
270#include <net/ip.h>
Chris Leech1a2449a2006-05-23 18:05:53 -0700271#include <net/netdma.h>
Jens Axboe9c55e012007-11-06 23:30:13 -0800272#include <net/sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
274#include <asm/uaccess.h>
275#include <asm/ioctls.h>
276
Brian Haleyab32ea52006-09-22 14:15:41 -0700277int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Eric Dumazetba899662005-08-26 12:05:31 -0700279DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281atomic_t tcp_orphan_count = ATOMIC_INIT(0);
282
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -0700283EXPORT_SYMBOL_GPL(tcp_orphan_count);
284
David S. Millerb8059ea2006-03-25 01:36:56 -0800285int sysctl_tcp_mem[3] __read_mostly;
286int sysctl_tcp_wmem[3] __read_mostly;
287int sysctl_tcp_rmem[3] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
289EXPORT_SYMBOL(sysctl_tcp_mem);
290EXPORT_SYMBOL(sysctl_tcp_rmem);
291EXPORT_SYMBOL(sysctl_tcp_wmem);
292
293atomic_t tcp_memory_allocated; /* Current allocated memory. */
294atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
295
296EXPORT_SYMBOL(tcp_memory_allocated);
297EXPORT_SYMBOL(tcp_sockets_allocated);
298
299/*
Jens Axboe9c55e012007-11-06 23:30:13 -0800300 * TCP splice context
301 */
302struct tcp_splice_state {
303 struct pipe_inode_info *pipe;
304 size_t len;
305 unsigned int flags;
306};
307
308/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 * Pressure flag: try to collapse.
310 * Technical note: it is used by multiple contexts non atomically.
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800311 * All the __sk_mem_schedule() is of this nature: accounting
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 * is strict, actions are advisory and have some latency.
313 */
Eric Dumazet4103f8c2007-03-27 13:58:31 -0700314int tcp_memory_pressure __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316EXPORT_SYMBOL(tcp_memory_pressure);
317
318void tcp_enter_memory_pressure(void)
319{
320 if (!tcp_memory_pressure) {
321 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
322 tcp_memory_pressure = 1;
323 }
324}
325
326EXPORT_SYMBOL(tcp_enter_memory_pressure);
327
328/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 * Wait for a TCP event.
330 *
331 * Note that we don't need to lock the socket, as the upper poll layers
332 * take care of normal races (between the test and the event) and we don't
333 * go look at any of the socket buffers directly.
334 */
335unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
336{
337 unsigned int mask;
338 struct sock *sk = sock->sk;
339 struct tcp_sock *tp = tcp_sk(sk);
340
341 poll_wait(file, sk->sk_sleep, wait);
342 if (sk->sk_state == TCP_LISTEN)
Arnaldo Carvalho de Melodc40c7b2005-08-23 21:52:58 -0700343 return inet_csk_listen_poll(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 /* Socket is not locked. We are protected from async events
346 by poll logic and correct handling of state changes
347 made by another threads is impossible in any case.
348 */
349
350 mask = 0;
351 if (sk->sk_err)
352 mask = POLLERR;
353
354 /*
355 * POLLHUP is certainly not done right. But poll() doesn't
356 * have a notion of HUP in just one direction, and for a
357 * socket the read side is more interesting.
358 *
359 * Some poll() documentation says that POLLHUP is incompatible
360 * with the POLLOUT/POLLWR flags, so somebody should check this
361 * all. But careful, it tends to be safer to return too many
362 * bits than too few, and you can easily break real applications
363 * if you don't tell them that something has hung up!
364 *
365 * Check-me.
366 *
367 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
368 * our fs/select.c). It means that after we received EOF,
369 * poll always returns immediately, making impossible poll() on write()
370 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
371 * if and only if shutdown has been made in both directions.
372 * Actually, it is interesting to look how Solaris and DUX
373 * solve this dilemma. I would prefer, if PULLHUP were maskable,
374 * then we could set it on SND_SHUTDOWN. BTW examples given
375 * in Stevens' books assume exactly this behaviour, it explains
376 * why PULLHUP is incompatible with POLLOUT. --ANK
377 *
378 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
379 * blocking on fresh not-connected or disconnected socket. --ANK
380 */
381 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
382 mask |= POLLHUP;
383 if (sk->sk_shutdown & RCV_SHUTDOWN)
Davide Libenzif348d702006-03-25 03:07:39 -0800384 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386 /* Connected? */
387 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
388 /* Potential race condition. If read of tp below will
389 * escape above sk->sk_state, we can be illegally awaken
390 * in SYN_* states. */
391 if ((tp->rcv_nxt != tp->copied_seq) &&
392 (tp->urg_seq != tp->copied_seq ||
393 tp->rcv_nxt != tp->copied_seq + 1 ||
394 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
395 mask |= POLLIN | POLLRDNORM;
396
397 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
398 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
399 mask |= POLLOUT | POLLWRNORM;
400 } else { /* send SIGIO later */
401 set_bit(SOCK_ASYNC_NOSPACE,
402 &sk->sk_socket->flags);
403 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
404
405 /* Race breaker. If space is freed after
406 * wspace test but before the flags are set,
407 * IO signal will be lost.
408 */
409 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
410 mask |= POLLOUT | POLLWRNORM;
411 }
412 }
413
414 if (tp->urg_data & TCP_URG_VALID)
415 mask |= POLLPRI;
416 }
417 return mask;
418}
419
420int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
421{
422 struct tcp_sock *tp = tcp_sk(sk);
423 int answ;
424
425 switch (cmd) {
426 case SIOCINQ:
427 if (sk->sk_state == TCP_LISTEN)
428 return -EINVAL;
429
430 lock_sock(sk);
431 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
432 answ = 0;
433 else if (sock_flag(sk, SOCK_URGINLINE) ||
434 !tp->urg_data ||
435 before(tp->urg_seq, tp->copied_seq) ||
436 !before(tp->urg_seq, tp->rcv_nxt)) {
437 answ = tp->rcv_nxt - tp->copied_seq;
438
439 /* Subtract 1, if FIN is in queue. */
440 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
441 answ -=
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700442 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 } else
444 answ = tp->urg_seq - tp->copied_seq;
445 release_sock(sk);
446 break;
447 case SIOCATMARK:
448 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
449 break;
450 case SIOCOUTQ:
451 if (sk->sk_state == TCP_LISTEN)
452 return -EINVAL;
453
454 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
455 answ = 0;
456 else
457 answ = tp->write_seq - tp->snd_una;
458 break;
459 default:
460 return -ENOIOCTLCMD;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 return put_user(answ, (int __user *)arg);
464}
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
467{
468 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
469 tp->pushed_seq = tp->write_seq;
470}
471
472static inline int forced_push(struct tcp_sock *tp)
473{
474 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
475}
476
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700477static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700479 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo352d4802006-11-17 19:59:12 -0200480 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
481
482 skb->csum = 0;
483 tcb->seq = tcb->end_seq = tp->write_seq;
484 tcb->flags = TCPCB_FLAG_ACK;
485 tcb->sacked = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 skb_header_release(skb);
David S. Millerfe067e82007-03-07 12:12:44 -0800487 tcp_add_write_queue_tail(sk, skb);
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800488 sk->sk_wmem_queued += skb->truesize;
489 sk_mem_charge(sk, skb->truesize);
David S. Miller89ebd192005-08-23 10:13:06 -0700490 if (tp->nonagle & TCP_NAGLE_PUSH)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900491 tp->nonagle &= ~TCP_NAGLE_PUSH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492}
493
494static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
495 struct sk_buff *skb)
496{
497 if (flags & MSG_OOB) {
498 tp->urg_mode = 1;
499 tp->snd_up = tp->write_seq;
500 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
501 }
502}
503
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700504static inline void tcp_push(struct sock *sk, int flags, int mss_now,
505 int nonagle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700507 struct tcp_sock *tp = tcp_sk(sk);
508
David S. Millerfe067e82007-03-07 12:12:44 -0800509 if (tcp_send_head(sk)) {
510 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511 if (!(flags & MSG_MORE) || forced_push(tp))
512 tcp_mark_push(tp, skb);
513 tcp_mark_urg(tp, flags, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700514 __tcp_push_pending_frames(sk, mss_now,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
516 }
517}
518
Adrian Bunk6ff77512007-11-06 23:32:26 -0800519static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
520 unsigned int offset, size_t len)
Jens Axboe9c55e012007-11-06 23:30:13 -0800521{
522 struct tcp_splice_state *tss = rd_desc->arg.data;
523
524 return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
525}
526
527static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
528{
529 /* Store TCP splice context information in read_descriptor_t. */
530 read_descriptor_t rd_desc = {
531 .arg.data = tss,
532 };
533
534 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
535}
536
537/**
538 * tcp_splice_read - splice data from TCP socket to a pipe
539 * @sock: socket to splice from
540 * @ppos: position (not valid)
541 * @pipe: pipe to splice to
542 * @len: number of bytes to splice
543 * @flags: splice modifier flags
544 *
545 * Description:
546 * Will read pages from given socket and fill them into a pipe.
547 *
548 **/
549ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
550 struct pipe_inode_info *pipe, size_t len,
551 unsigned int flags)
552{
553 struct sock *sk = sock->sk;
554 struct tcp_splice_state tss = {
555 .pipe = pipe,
556 .len = len,
557 .flags = flags,
558 };
559 long timeo;
560 ssize_t spliced;
561 int ret;
562
563 /*
564 * We can't seek on a socket input
565 */
566 if (unlikely(*ppos))
567 return -ESPIPE;
568
569 ret = spliced = 0;
570
571 lock_sock(sk);
572
573 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
574 while (tss.len) {
575 ret = __tcp_splice_read(sk, &tss);
576 if (ret < 0)
577 break;
578 else if (!ret) {
579 if (spliced)
580 break;
581 if (flags & SPLICE_F_NONBLOCK) {
582 ret = -EAGAIN;
583 break;
584 }
585 if (sock_flag(sk, SOCK_DONE))
586 break;
587 if (sk->sk_err) {
588 ret = sock_error(sk);
589 break;
590 }
591 if (sk->sk_shutdown & RCV_SHUTDOWN)
592 break;
593 if (sk->sk_state == TCP_CLOSE) {
594 /*
595 * This occurs when user tries to read
596 * from never connected socket.
597 */
598 if (!sock_flag(sk, SOCK_DONE))
599 ret = -ENOTCONN;
600 break;
601 }
602 if (!timeo) {
603 ret = -EAGAIN;
604 break;
605 }
606 sk_wait_data(sk, &timeo);
607 if (signal_pending(current)) {
608 ret = sock_intr_errno(timeo);
609 break;
610 }
611 continue;
612 }
613 tss.len -= ret;
614 spliced += ret;
615
616 release_sock(sk);
617 lock_sock(sk);
618
619 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
620 (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
621 signal_pending(current))
622 break;
623 }
624
625 release_sock(sk);
626
627 if (spliced)
628 return spliced;
629
630 return ret;
631}
632
Pavel Emelyanovdf97c702007-11-29 21:22:33 +1100633struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
Pavel Emelyanovf561d0f2007-11-29 20:28:50 +1100634{
635 struct sk_buff *skb;
636
637 /* The TCP header must be at least 32-bit aligned. */
638 size = ALIGN(size, 4);
639
640 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
641 if (skb) {
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800642 if (sk_wmem_schedule(sk, skb->truesize)) {
Pavel Emelyanovf561d0f2007-11-29 20:28:50 +1100643 /*
644 * Make sure that we have exactly size bytes
645 * available to the caller, no more, no less.
646 */
647 skb_reserve(skb, skb_tailroom(skb) - size);
648 return skb;
649 }
650 __kfree_skb(skb);
651 } else {
652 sk->sk_prot->enter_memory_pressure();
653 sk_stream_moderate_sndbuf(sk);
654 }
655 return NULL;
656}
657
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
659 size_t psize, int flags)
660{
661 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700662 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 int err;
664 ssize_t copied;
665 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
666
667 /* Wait for a connection to finish. */
668 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
669 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
670 goto out_err;
671
672 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
673
674 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700675 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 copied = 0;
677
678 err = -EPIPE;
679 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
680 goto do_error;
681
682 while (psize > 0) {
David S. Millerfe067e82007-03-07 12:12:44 -0800683 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 struct page *page = pages[poffset / PAGE_SIZE];
685 int copy, i, can_coalesce;
686 int offset = poffset % PAGE_SIZE;
687 int size = min_t(size_t, psize, PAGE_SIZE - offset);
688
David S. Millerfe067e82007-03-07 12:12:44 -0800689 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690new_segment:
691 if (!sk_stream_memory_free(sk))
692 goto wait_for_sndbuf;
693
Pavel Emelyanovdf97c702007-11-29 21:22:33 +1100694 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 if (!skb)
696 goto wait_for_memory;
697
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700698 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700699 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 }
701
702 if (copy > size)
703 copy = size;
704
705 i = skb_shinfo(skb)->nr_frags;
706 can_coalesce = skb_can_coalesce(skb, i, page, offset);
707 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
708 tcp_mark_push(tp, skb);
709 goto new_segment;
710 }
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800711 if (!sk_wmem_schedule(sk, copy))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 goto wait_for_memory;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900713
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 if (can_coalesce) {
715 skb_shinfo(skb)->frags[i - 1].size += copy;
716 } else {
717 get_page(page);
718 skb_fill_page_desc(skb, i, page, offset, copy);
719 }
720
721 skb->len += copy;
722 skb->data_len += copy;
723 skb->truesize += copy;
724 sk->sk_wmem_queued += copy;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800725 sk_mem_charge(sk, copy);
Patrick McHardy84fa7932006-08-29 16:44:56 -0700726 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 tp->write_seq += copy;
728 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700729 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
731 if (!copied)
732 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
733
734 copied += copy;
735 poffset += copy;
736 if (!(psize -= copy))
737 goto out;
738
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700739 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 continue;
741
742 if (forced_push(tp)) {
743 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700744 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800745 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 tcp_push_one(sk, mss_now);
747 continue;
748
749wait_for_sndbuf:
750 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
751wait_for_memory:
752 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700753 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
755 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
756 goto do_error;
757
758 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700759 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760 }
761
762out:
763 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700764 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 return copied;
766
767do_error:
768 if (copied)
769 goto out;
770out_err:
771 return sk_stream_error(sk, flags, err);
772}
773
774ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
775 size_t size, int flags)
776{
777 ssize_t res;
778 struct sock *sk = sock->sk;
779
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 if (!(sk->sk_route_caps & NETIF_F_SG) ||
Herbert Xu8648b302006-06-17 22:06:05 -0700781 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 return sock_no_sendpage(sock, page, offset, size, flags);
783
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 lock_sock(sk);
785 TCP_CHECK_TIMER(sk);
786 res = do_tcp_sendpages(sk, &page, offset, size, flags);
787 TCP_CHECK_TIMER(sk);
788 release_sock(sk);
789 return res;
790}
791
792#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
793#define TCP_OFF(sk) (sk->sk_sndmsg_off)
794
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700795static inline int select_size(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700797 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700798 int tmp = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799
David S. Millerb4e26f52005-07-05 15:20:27 -0700800 if (sk->sk_route_caps & NETIF_F_SG) {
Herbert Xubcd76112006-06-30 13:36:35 -0700801 if (sk_can_gso(sk))
David S. Millerb4e26f52005-07-05 15:20:27 -0700802 tmp = 0;
803 else {
804 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
805
806 if (tmp >= pgbreak &&
807 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
808 tmp = pgbreak;
809 }
810 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 return tmp;
813}
814
David S. Miller3516ffb2007-08-02 19:23:56 -0700815int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 size_t size)
817{
David S. Miller3516ffb2007-08-02 19:23:56 -0700818 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 struct iovec *iov;
820 struct tcp_sock *tp = tcp_sk(sk);
821 struct sk_buff *skb;
822 int iovlen, flags;
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700823 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 int err, copied;
825 long timeo;
826
827 lock_sock(sk);
828 TCP_CHECK_TIMER(sk);
829
830 flags = msg->msg_flags;
831 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
832
833 /* Wait for a connection to finish. */
834 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
835 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
836 goto out_err;
837
838 /* This should be in poll */
839 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
840
841 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700842 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
844 /* Ok commence sending. */
845 iovlen = msg->msg_iovlen;
846 iov = msg->msg_iov;
847 copied = 0;
848
849 err = -EPIPE;
850 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
851 goto do_error;
852
853 while (--iovlen >= 0) {
854 int seglen = iov->iov_len;
855 unsigned char __user *from = iov->iov_base;
856
857 iov++;
858
859 while (seglen > 0) {
860 int copy;
861
David S. Millerfe067e82007-03-07 12:12:44 -0800862 skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
David S. Millerfe067e82007-03-07 12:12:44 -0800864 if (!tcp_send_head(sk) ||
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700865 (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
867new_segment:
868 /* Allocate new segment. If the interface is SG,
869 * allocate skb fitting to single page.
870 */
871 if (!sk_stream_memory_free(sk))
872 goto wait_for_sndbuf;
873
Pavel Emelyanovdf97c702007-11-29 21:22:33 +1100874 skb = sk_stream_alloc_skb(sk, select_size(sk),
875 sk->sk_allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 if (!skb)
877 goto wait_for_memory;
878
879 /*
880 * Check whether we can use HW checksum.
881 */
Herbert Xu8648b302006-06-17 22:06:05 -0700882 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700883 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700885 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700886 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 }
888
889 /* Try to append data to the end of skb. */
890 if (copy > seglen)
891 copy = seglen;
892
893 /* Where to copy to? */
894 if (skb_tailroom(skb) > 0) {
895 /* We have some space in skb head. Superb! */
896 if (copy > skb_tailroom(skb))
897 copy = skb_tailroom(skb);
898 if ((err = skb_add_data(skb, from, copy)) != 0)
899 goto do_fault;
900 } else {
901 int merge = 0;
902 int i = skb_shinfo(skb)->nr_frags;
903 struct page *page = TCP_PAGE(sk);
904 int off = TCP_OFF(sk);
905
906 if (skb_can_coalesce(skb, i, page, off) &&
907 off != PAGE_SIZE) {
908 /* We can extend the last page
909 * fragment. */
910 merge = 1;
911 } else if (i == MAX_SKB_FRAGS ||
912 (!i &&
913 !(sk->sk_route_caps & NETIF_F_SG))) {
914 /* Need to add new fragment and cannot
915 * do this because interface is non-SG,
916 * or because all the page slots are
917 * busy. */
918 tcp_mark_push(tp, skb);
919 goto new_segment;
920 } else if (page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 if (off == PAGE_SIZE) {
922 put_page(page);
923 TCP_PAGE(sk) = page = NULL;
Herbert Xufb5f5e62005-09-05 18:55:48 -0700924 off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 }
Herbert Xuef015782005-09-01 17:48:59 -0700926 } else
Herbert Xufb5f5e62005-09-05 18:55:48 -0700927 off = 0;
Herbert Xuef015782005-09-01 17:48:59 -0700928
929 if (copy > PAGE_SIZE - off)
930 copy = PAGE_SIZE - off;
931
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800932 if (!sk_wmem_schedule(sk, copy))
Herbert Xuef015782005-09-01 17:48:59 -0700933 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
935 if (!page) {
936 /* Allocate new cache page. */
937 if (!(page = sk_stream_alloc_page(sk)))
938 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 }
940
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 /* Time to copy data. We are close to
942 * the end! */
943 err = skb_copy_to_page(sk, from, skb, page,
944 off, copy);
945 if (err) {
946 /* If this page was new, give it to the
947 * socket so it does not get leaked.
948 */
949 if (!TCP_PAGE(sk)) {
950 TCP_PAGE(sk) = page;
951 TCP_OFF(sk) = 0;
952 }
953 goto do_error;
954 }
955
956 /* Update the skb. */
957 if (merge) {
958 skb_shinfo(skb)->frags[i - 1].size +=
959 copy;
960 } else {
961 skb_fill_page_desc(skb, i, page, off, copy);
962 if (TCP_PAGE(sk)) {
963 get_page(page);
964 } else if (off + copy < PAGE_SIZE) {
965 get_page(page);
966 TCP_PAGE(sk) = page;
967 }
968 }
969
970 TCP_OFF(sk) = off + copy;
971 }
972
973 if (!copied)
974 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
975
976 tp->write_seq += copy;
977 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700978 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979
980 from += copy;
981 copied += copy;
982 if ((seglen -= copy) == 0 && iovlen == 0)
983 goto out;
984
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700985 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 continue;
987
988 if (forced_push(tp)) {
989 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700990 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800991 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 tcp_push_one(sk, mss_now);
993 continue;
994
995wait_for_sndbuf:
996 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
997wait_for_memory:
998 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700999 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1002 goto do_error;
1003
1004 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001005 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 }
1007 }
1008
1009out:
1010 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001011 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 TCP_CHECK_TIMER(sk);
1013 release_sock(sk);
1014 return copied;
1015
1016do_fault:
1017 if (!skb->len) {
David S. Millerfe067e82007-03-07 12:12:44 -08001018 tcp_unlink_write_queue(skb, sk);
1019 /* It is the one place in all of TCP, except connection
1020 * reset, where we can be unlinking the send_head.
1021 */
1022 tcp_check_send_head(sk, skb);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001023 sk_wmem_free_skb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 }
1025
1026do_error:
1027 if (copied)
1028 goto out;
1029out_err:
1030 err = sk_stream_error(sk, flags, err);
1031 TCP_CHECK_TIMER(sk);
1032 release_sock(sk);
1033 return err;
1034}
1035
1036/*
1037 * Handle reading urgent data. BSD has very simple semantics for
1038 * this, no blocking and very strange errors 8)
1039 */
1040
1041static int tcp_recv_urg(struct sock *sk, long timeo,
1042 struct msghdr *msg, int len, int flags,
1043 int *addr_len)
1044{
1045 struct tcp_sock *tp = tcp_sk(sk);
1046
1047 /* No URG data to read. */
1048 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1049 tp->urg_data == TCP_URG_READ)
1050 return -EINVAL; /* Yes this is right ! */
1051
1052 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1053 return -ENOTCONN;
1054
1055 if (tp->urg_data & TCP_URG_VALID) {
1056 int err = 0;
1057 char c = tp->urg_data;
1058
1059 if (!(flags & MSG_PEEK))
1060 tp->urg_data = TCP_URG_READ;
1061
1062 /* Read urgent data. */
1063 msg->msg_flags |= MSG_OOB;
1064
1065 if (len > 0) {
1066 if (!(flags & MSG_TRUNC))
1067 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1068 len = 1;
1069 } else
1070 msg->msg_flags |= MSG_TRUNC;
1071
1072 return err ? -EFAULT : len;
1073 }
1074
1075 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1076 return 0;
1077
1078 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1079 * the available implementations agree in this case:
1080 * this call should never block, independent of the
1081 * blocking state of the socket.
1082 * Mike <pall@rz.uni-karlsruhe.de>
1083 */
1084 return -EAGAIN;
1085}
1086
1087/* Clean up the receive buffer for full frames taken by the user,
1088 * then send an ACK if necessary. COPIED is the number of bytes
1089 * tcp_recvmsg has given to the user so far, it speeds up the
1090 * calculation of whether or not we must ACK for the sake of
1091 * a window update.
1092 */
Chris Leech0e4b4992006-05-23 18:00:16 -07001093void tcp_cleanup_rbuf(struct sock *sk, int copied)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094{
1095 struct tcp_sock *tp = tcp_sk(sk);
1096 int time_to_ack = 0;
1097
1098#if TCP_DEBUG
1099 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1100
1101 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1102#endif
1103
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001104 if (inet_csk_ack_scheduled(sk)) {
1105 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 /* Delayed ACKs frequently hit locked sockets during bulk
1107 * receive. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001108 if (icsk->icsk_ack.blocked ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 /* Once-per-two-segments ACK was not sent by tcp_input.c */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001110 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 /*
1112 * If this read emptied read buffer, we send ACK, if
1113 * connection is not bidirectional, user drained
1114 * receive buffer and there was a small segment
1115 * in queue.
1116 */
Alexey Kuznetsov1ef96962006-09-19 12:52:50 -07001117 (copied > 0 &&
1118 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1119 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1120 !icsk->icsk_ack.pingpong)) &&
1121 !atomic_read(&sk->sk_rmem_alloc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001122 time_to_ack = 1;
1123 }
1124
1125 /* We send an ACK if we can now advertise a non-zero window
1126 * which has been raised "significantly".
1127 *
1128 * Even if window raised up to infinity, do not send window open ACK
1129 * in states, where we will not receive more. It is useless.
1130 */
1131 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1132 __u32 rcv_window_now = tcp_receive_window(tp);
1133
1134 /* Optimize, __tcp_select_window() is not cheap. */
1135 if (2*rcv_window_now <= tp->window_clamp) {
1136 __u32 new_window = __tcp_select_window(sk);
1137
1138 /* Send ACK now, if this read freed lots of space
1139 * in our buffer. Certainly, new_window is new window.
1140 * We can advertise it now, if it is not less than current one.
1141 * "Lots" means "at least twice" here.
1142 */
1143 if (new_window && new_window >= 2 * rcv_window_now)
1144 time_to_ack = 1;
1145 }
1146 }
1147 if (time_to_ack)
1148 tcp_send_ack(sk);
1149}
1150
1151static void tcp_prequeue_process(struct sock *sk)
1152{
1153 struct sk_buff *skb;
1154 struct tcp_sock *tp = tcp_sk(sk);
1155
David S. Millerb03efcf2005-07-08 14:57:23 -07001156 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157
1158 /* RX process wants to run with disabled BHs, though it is not
1159 * necessary */
1160 local_bh_disable();
1161 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1162 sk->sk_backlog_rcv(sk, skb);
1163 local_bh_enable();
1164
1165 /* Clear memory counter. */
1166 tp->ucopy.memory = 0;
1167}
1168
1169static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1170{
1171 struct sk_buff *skb;
1172 u32 offset;
1173
1174 skb_queue_walk(&sk->sk_receive_queue, skb) {
1175 offset = seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001176 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 offset--;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001178 if (offset < skb->len || tcp_hdr(skb)->fin) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001179 *off = offset;
1180 return skb;
1181 }
1182 }
1183 return NULL;
1184}
1185
1186/*
1187 * This routine provides an alternative to tcp_recvmsg() for routines
1188 * that would like to handle copying from skbuffs directly in 'sendfile'
1189 * fashion.
1190 * Note:
1191 * - It is assumed that the socket was locked by the caller.
1192 * - The routine does not block.
1193 * - At present, there is no support for reading OOB data
1194 * or for 'peeking' the socket using this routine
1195 * (although both would be easy to implement).
1196 */
1197int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1198 sk_read_actor_t recv_actor)
1199{
1200 struct sk_buff *skb;
1201 struct tcp_sock *tp = tcp_sk(sk);
1202 u32 seq = tp->copied_seq;
1203 u32 offset;
1204 int copied = 0;
1205
1206 if (sk->sk_state == TCP_LISTEN)
1207 return -ENOTCONN;
1208 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1209 if (offset < skb->len) {
1210 size_t used, len;
1211
1212 len = skb->len - offset;
1213 /* Stop reading if we hit a patch of urgent data */
1214 if (tp->urg_data) {
1215 u32 urg_offset = tp->urg_seq - seq;
1216 if (urg_offset < len)
1217 len = urg_offset;
1218 if (!len)
1219 break;
1220 }
1221 used = recv_actor(desc, skb, offset, len);
Jens Axboeddb61a52007-06-23 23:07:50 -07001222 if (used < 0) {
1223 if (!copied)
1224 copied = used;
1225 break;
1226 } else if (used <= len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 seq += used;
1228 copied += used;
1229 offset += used;
1230 }
1231 if (offset != skb->len)
1232 break;
1233 }
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001234 if (tcp_hdr(skb)->fin) {
Chris Leech624d1162006-05-23 18:01:28 -07001235 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 ++seq;
1237 break;
1238 }
Chris Leech624d1162006-05-23 18:01:28 -07001239 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 if (!desc->count)
1241 break;
1242 }
1243 tp->copied_seq = seq;
1244
1245 tcp_rcv_space_adjust(sk);
1246
1247 /* Clean up data we have read: This will do ACK frames. */
Jens Axboeddb61a52007-06-23 23:07:50 -07001248 if (copied > 0)
Chris Leech0e4b4992006-05-23 18:00:16 -07001249 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 return copied;
1251}
1252
1253/*
1254 * This routine copies from a sock struct into the user buffer.
1255 *
1256 * Technical note: in 2.3 we work on _locked_ socket, so that
1257 * tricks with *seq access order and skb->users are not required.
1258 * Probably, code can be easily improved even more.
1259 */
1260
1261int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1262 size_t len, int nonblock, int flags, int *addr_len)
1263{
1264 struct tcp_sock *tp = tcp_sk(sk);
1265 int copied = 0;
1266 u32 peek_seq;
1267 u32 *seq;
1268 unsigned long used;
1269 int err;
1270 int target; /* Read at least this many bytes */
1271 long timeo;
1272 struct task_struct *user_recv = NULL;
Chris Leech1a2449a2006-05-23 18:05:53 -07001273 int copied_early = 0;
Chris Leech2b1244a2007-03-08 09:57:36 -08001274 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
1276 lock_sock(sk);
1277
1278 TCP_CHECK_TIMER(sk);
1279
1280 err = -ENOTCONN;
1281 if (sk->sk_state == TCP_LISTEN)
1282 goto out;
1283
1284 timeo = sock_rcvtimeo(sk, nonblock);
1285
1286 /* Urgent data needs to be handled specially. */
1287 if (flags & MSG_OOB)
1288 goto recv_urg;
1289
1290 seq = &tp->copied_seq;
1291 if (flags & MSG_PEEK) {
1292 peek_seq = tp->copied_seq;
1293 seq = &peek_seq;
1294 }
1295
1296 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1297
Chris Leech1a2449a2006-05-23 18:05:53 -07001298#ifdef CONFIG_NET_DMA
1299 tp->ucopy.dma_chan = NULL;
1300 preempt_disable();
Chris Leech2b1244a2007-03-08 09:57:36 -08001301 skb = skb_peek_tail(&sk->sk_receive_queue);
Andrew Mortone00c5d82007-03-08 09:57:36 -08001302 {
1303 int available = 0;
1304
1305 if (skb)
1306 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1307 if ((available < target) &&
1308 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1309 !sysctl_tcp_low_latency &&
1310 __get_cpu_var(softnet_data).net_dma) {
1311 preempt_enable_no_resched();
1312 tp->ucopy.pinned_list =
1313 dma_pin_iovec_pages(msg->msg_iov, len);
1314 } else {
1315 preempt_enable_no_resched();
1316 }
1317 }
Chris Leech1a2449a2006-05-23 18:05:53 -07001318#endif
1319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 u32 offset;
1322
1323 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1324 if (tp->urg_data && tp->urg_seq == *seq) {
1325 if (copied)
1326 break;
1327 if (signal_pending(current)) {
1328 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1329 break;
1330 }
1331 }
1332
1333 /* Next get a buffer. */
1334
1335 skb = skb_peek(&sk->sk_receive_queue);
1336 do {
1337 if (!skb)
1338 break;
1339
1340 /* Now that we have two receive queues this
1341 * shouldn't happen.
1342 */
1343 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1344 printk(KERN_INFO "recvmsg bug: copied %X "
1345 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1346 break;
1347 }
1348 offset = *seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001349 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 offset--;
1351 if (offset < skb->len)
1352 goto found_ok_skb;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001353 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 goto found_fin_ok;
1355 BUG_TRAP(flags & MSG_PEEK);
1356 skb = skb->next;
1357 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1358
1359 /* Well, if we have backlog, try to process it now yet. */
1360
1361 if (copied >= target && !sk->sk_backlog.tail)
1362 break;
1363
1364 if (copied) {
1365 if (sk->sk_err ||
1366 sk->sk_state == TCP_CLOSE ||
1367 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1368 !timeo ||
1369 signal_pending(current) ||
1370 (flags & MSG_PEEK))
1371 break;
1372 } else {
1373 if (sock_flag(sk, SOCK_DONE))
1374 break;
1375
1376 if (sk->sk_err) {
1377 copied = sock_error(sk);
1378 break;
1379 }
1380
1381 if (sk->sk_shutdown & RCV_SHUTDOWN)
1382 break;
1383
1384 if (sk->sk_state == TCP_CLOSE) {
1385 if (!sock_flag(sk, SOCK_DONE)) {
1386 /* This occurs when user tries to read
1387 * from never connected socket.
1388 */
1389 copied = -ENOTCONN;
1390 break;
1391 }
1392 break;
1393 }
1394
1395 if (!timeo) {
1396 copied = -EAGAIN;
1397 break;
1398 }
1399
1400 if (signal_pending(current)) {
1401 copied = sock_intr_errno(timeo);
1402 break;
1403 }
1404 }
1405
Chris Leech0e4b4992006-05-23 18:00:16 -07001406 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407
David S. Miller7df55122005-06-18 23:01:10 -07001408 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 /* Install new reader */
1410 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1411 user_recv = current;
1412 tp->ucopy.task = user_recv;
1413 tp->ucopy.iov = msg->msg_iov;
1414 }
1415
1416 tp->ucopy.len = len;
1417
1418 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1419 (flags & (MSG_PEEK | MSG_TRUNC)));
1420
1421 /* Ugly... If prequeue is not empty, we have to
1422 * process it before releasing socket, otherwise
1423 * order will be broken at second iteration.
1424 * More elegant solution is required!!!
1425 *
1426 * Look: we have the following (pseudo)queues:
1427 *
1428 * 1. packets in flight
1429 * 2. backlog
1430 * 3. prequeue
1431 * 4. receive_queue
1432 *
1433 * Each queue can be processed only if the next ones
1434 * are empty. At this point we have empty receive_queue.
1435 * But prequeue _can_ be not empty after 2nd iteration,
1436 * when we jumped to start of loop because backlog
1437 * processing added something to receive_queue.
1438 * We cannot release_sock(), because backlog contains
1439 * packets arrived _after_ prequeued ones.
1440 *
1441 * Shortly, algorithm is clear --- to process all
1442 * the queues in order. We could make it more directly,
1443 * requeueing packets from backlog to prequeue, if
1444 * is not empty. It is more elegant, but eats cycles,
1445 * unfortunately.
1446 */
David S. Millerb03efcf2005-07-08 14:57:23 -07001447 if (!skb_queue_empty(&tp->ucopy.prequeue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 goto do_prequeue;
1449
1450 /* __ Set realtime policy in scheduler __ */
1451 }
1452
1453 if (copied >= target) {
1454 /* Do not sleep, just process backlog. */
1455 release_sock(sk);
1456 lock_sock(sk);
1457 } else
1458 sk_wait_data(sk, &timeo);
1459
Chris Leech1a2449a2006-05-23 18:05:53 -07001460#ifdef CONFIG_NET_DMA
1461 tp->ucopy.wakeup = 0;
1462#endif
1463
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 if (user_recv) {
1465 int chunk;
1466
1467 /* __ Restore normal policy in scheduler __ */
1468
1469 if ((chunk = len - tp->ucopy.len) != 0) {
1470 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1471 len -= chunk;
1472 copied += chunk;
1473 }
1474
1475 if (tp->rcv_nxt == tp->copied_seq &&
David S. Millerb03efcf2005-07-08 14:57:23 -07001476 !skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477do_prequeue:
1478 tcp_prequeue_process(sk);
1479
1480 if ((chunk = len - tp->ucopy.len) != 0) {
1481 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1482 len -= chunk;
1483 copied += chunk;
1484 }
1485 }
1486 }
1487 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1488 if (net_ratelimit())
1489 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001490 current->comm, task_pid_nr(current));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 peek_seq = tp->copied_seq;
1492 }
1493 continue;
1494
1495 found_ok_skb:
1496 /* Ok so how much can we use? */
1497 used = skb->len - offset;
1498 if (len < used)
1499 used = len;
1500
1501 /* Do we have urgent data here? */
1502 if (tp->urg_data) {
1503 u32 urg_offset = tp->urg_seq - *seq;
1504 if (urg_offset < used) {
1505 if (!urg_offset) {
1506 if (!sock_flag(sk, SOCK_URGINLINE)) {
1507 ++*seq;
1508 offset++;
1509 used--;
1510 if (!used)
1511 goto skip_copy;
1512 }
1513 } else
1514 used = urg_offset;
1515 }
1516 }
1517
1518 if (!(flags & MSG_TRUNC)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001519#ifdef CONFIG_NET_DMA
1520 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1521 tp->ucopy.dma_chan = get_softnet_dma();
1522
1523 if (tp->ucopy.dma_chan) {
1524 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1525 tp->ucopy.dma_chan, skb, offset,
1526 msg->msg_iov, used,
1527 tp->ucopy.pinned_list);
1528
1529 if (tp->ucopy.dma_cookie < 0) {
1530
1531 printk(KERN_ALERT "dma_cookie < 0\n");
1532
1533 /* Exception. Bailout! */
1534 if (!copied)
1535 copied = -EFAULT;
1536 break;
1537 }
1538 if ((offset + used) == skb->len)
1539 copied_early = 1;
1540
1541 } else
1542#endif
1543 {
1544 err = skb_copy_datagram_iovec(skb, offset,
1545 msg->msg_iov, used);
1546 if (err) {
1547 /* Exception. Bailout! */
1548 if (!copied)
1549 copied = -EFAULT;
1550 break;
1551 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001552 }
1553 }
1554
1555 *seq += used;
1556 copied += used;
1557 len -= used;
1558
1559 tcp_rcv_space_adjust(sk);
1560
1561skip_copy:
1562 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1563 tp->urg_data = 0;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001564 tcp_fast_path_check(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 }
1566 if (used + offset < skb->len)
1567 continue;
1568
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001569 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 goto found_fin_ok;
Chris Leech1a2449a2006-05-23 18:05:53 -07001571 if (!(flags & MSG_PEEK)) {
1572 sk_eat_skb(sk, skb, copied_early);
1573 copied_early = 0;
1574 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001575 continue;
1576
1577 found_fin_ok:
1578 /* Process the FIN. */
1579 ++*seq;
Chris Leech1a2449a2006-05-23 18:05:53 -07001580 if (!(flags & MSG_PEEK)) {
1581 sk_eat_skb(sk, skb, copied_early);
1582 copied_early = 0;
1583 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001584 break;
1585 } while (len > 0);
1586
1587 if (user_recv) {
David S. Millerb03efcf2005-07-08 14:57:23 -07001588 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 int chunk;
1590
1591 tp->ucopy.len = copied > 0 ? len : 0;
1592
1593 tcp_prequeue_process(sk);
1594
1595 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1596 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1597 len -= chunk;
1598 copied += chunk;
1599 }
1600 }
1601
1602 tp->ucopy.task = NULL;
1603 tp->ucopy.len = 0;
1604 }
1605
Chris Leech1a2449a2006-05-23 18:05:53 -07001606#ifdef CONFIG_NET_DMA
1607 if (tp->ucopy.dma_chan) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001608 dma_cookie_t done, used;
1609
1610 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1611
1612 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001613 tp->ucopy.dma_cookie, &done,
1614 &used) == DMA_IN_PROGRESS) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001615 /* do partial cleanup of sk_async_wait_queue */
1616 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1617 (dma_async_is_complete(skb->dma_cookie, done,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001618 used) == DMA_SUCCESS)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001619 __skb_dequeue(&sk->sk_async_wait_queue);
1620 kfree_skb(skb);
1621 }
1622 }
1623
1624 /* Safe to free early-copied skbs now */
1625 __skb_queue_purge(&sk->sk_async_wait_queue);
1626 dma_chan_put(tp->ucopy.dma_chan);
1627 tp->ucopy.dma_chan = NULL;
1628 }
1629 if (tp->ucopy.pinned_list) {
1630 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1631 tp->ucopy.pinned_list = NULL;
1632 }
1633#endif
1634
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635 /* According to UNIX98, msg_name/msg_namelen are ignored
1636 * on connected socket. I was just happy when found this 8) --ANK
1637 */
1638
1639 /* Clean up data we have read: This will do ACK frames. */
Chris Leech0e4b4992006-05-23 18:00:16 -07001640 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641
1642 TCP_CHECK_TIMER(sk);
1643 release_sock(sk);
1644 return copied;
1645
1646out:
1647 TCP_CHECK_TIMER(sk);
1648 release_sock(sk);
1649 return err;
1650
1651recv_urg:
1652 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1653 goto out;
1654}
1655
1656/*
1657 * State processing on a close. This implements the state shift for
1658 * sending our FIN frame. Note that we only send a FIN for some
1659 * states. A shutdown() may have already sent the FIN, or we may be
1660 * closed.
1661 */
1662
Arjan van de Ven9b5b5cf2005-11-29 16:21:38 -08001663static const unsigned char new_state[16] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 /* current state: new state: action: */
1665 /* (Invalid) */ TCP_CLOSE,
1666 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1667 /* TCP_SYN_SENT */ TCP_CLOSE,
1668 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1669 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1670 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1671 /* TCP_TIME_WAIT */ TCP_CLOSE,
1672 /* TCP_CLOSE */ TCP_CLOSE,
1673 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1674 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1675 /* TCP_LISTEN */ TCP_CLOSE,
1676 /* TCP_CLOSING */ TCP_CLOSING,
1677};
1678
1679static int tcp_close_state(struct sock *sk)
1680{
1681 int next = (int)new_state[sk->sk_state];
1682 int ns = next & TCP_STATE_MASK;
1683
1684 tcp_set_state(sk, ns);
1685
1686 return next & TCP_ACTION_FIN;
1687}
1688
1689/*
1690 * Shutdown the sending side of a connection. Much like close except
1691 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1692 */
1693
1694void tcp_shutdown(struct sock *sk, int how)
1695{
1696 /* We need to grab some memory, and put together a FIN,
1697 * and then put it into the queue to be sent.
1698 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1699 */
1700 if (!(how & SEND_SHUTDOWN))
1701 return;
1702
1703 /* If we've already sent a FIN, or it's a closed state, skip this. */
1704 if ((1 << sk->sk_state) &
1705 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1706 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1707 /* Clear out any half completed packets. FIN if needed. */
1708 if (tcp_close_state(sk))
1709 tcp_send_fin(sk);
1710 }
1711}
1712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713void tcp_close(struct sock *sk, long timeout)
1714{
1715 struct sk_buff *skb;
1716 int data_was_unread = 0;
Herbert Xu75c2d9072006-05-03 23:31:35 -07001717 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
1719 lock_sock(sk);
1720 sk->sk_shutdown = SHUTDOWN_MASK;
1721
1722 if (sk->sk_state == TCP_LISTEN) {
1723 tcp_set_state(sk, TCP_CLOSE);
1724
1725 /* Special case. */
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001726 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
1728 goto adjudge_to_death;
1729 }
1730
1731 /* We need to flush the recv. buffs. We do this only on the
1732 * descriptor close, not protocol-sourced closes, because the
1733 * reader process may not have drained the data yet!
1734 */
1735 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1736 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001737 tcp_hdr(skb)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 data_was_unread += len;
1739 __kfree_skb(skb);
1740 }
1741
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001742 sk_mem_reclaim(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
Gerrit Renker65bb7232007-04-28 21:21:46 -07001744 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1745 * data was lost. To witness the awful effects of the old behavior of
1746 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1747 * GET in an FTP client, suspend the process, wait for the client to
1748 * advertise a zero window, then kill -9 the FTP client, wheee...
1749 * Note: timeout is always zero in such a case.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 */
1751 if (data_was_unread) {
1752 /* Unread data was tossed, zap the connection. */
1753 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1754 tcp_set_state(sk, TCP_CLOSE);
1755 tcp_send_active_reset(sk, GFP_KERNEL);
1756 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1757 /* Check zero linger _after_ checking for unread data. */
1758 sk->sk_prot->disconnect(sk, 0);
1759 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1760 } else if (tcp_close_state(sk)) {
1761 /* We FIN if the application ate all the data before
1762 * zapping the connection.
1763 */
1764
1765 /* RED-PEN. Formally speaking, we have broken TCP state
1766 * machine. State transitions:
1767 *
1768 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1769 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1770 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1771 *
1772 * are legal only when FIN has been sent (i.e. in window),
1773 * rather than queued out of window. Purists blame.
1774 *
1775 * F.e. "RFC state" is ESTABLISHED,
1776 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1777 *
1778 * The visible declinations are that sometimes
1779 * we enter time-wait state, when it is not required really
1780 * (harmless), do not send active resets, when they are
1781 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1782 * they look as CLOSING or LAST_ACK for Linux)
1783 * Probably, I missed some more holelets.
1784 * --ANK
1785 */
1786 tcp_send_fin(sk);
1787 }
1788
1789 sk_stream_wait_close(sk, timeout);
1790
1791adjudge_to_death:
Herbert Xu75c2d9072006-05-03 23:31:35 -07001792 state = sk->sk_state;
1793 sock_hold(sk);
1794 sock_orphan(sk);
1795 atomic_inc(sk->sk_prot->orphan_count);
1796
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797 /* It is the last release_sock in its life. It will remove backlog. */
1798 release_sock(sk);
1799
1800
1801 /* Now socket is owned by kernel and we acquire BH lock
1802 to finish close. No need to check for user refs.
1803 */
1804 local_bh_disable();
1805 bh_lock_sock(sk);
1806 BUG_TRAP(!sock_owned_by_user(sk));
1807
Herbert Xu75c2d9072006-05-03 23:31:35 -07001808 /* Have we already been destroyed by a softirq or backlog? */
1809 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1810 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
1812 /* This is a (useful) BSD violating of the RFC. There is a
1813 * problem with TCP as specified in that the other end could
1814 * keep a socket open forever with no application left this end.
1815 * We use a 3 minute timeout (about the same as BSD) then kill
1816 * our end. If they send after that then tough - BUT: long enough
1817 * that we won't make the old 4*rto = almost no time - whoops
1818 * reset mistake.
1819 *
1820 * Nope, it was not mistake. It is really desired behaviour
1821 * f.e. on http servers, when such sockets are useless, but
1822 * consume significant resources. Let's do it with special
1823 * linger2 option. --ANK
1824 */
1825
1826 if (sk->sk_state == TCP_FIN_WAIT2) {
1827 struct tcp_sock *tp = tcp_sk(sk);
1828 if (tp->linger2 < 0) {
1829 tcp_set_state(sk, TCP_CLOSE);
1830 tcp_send_active_reset(sk, GFP_ATOMIC);
1831 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1832 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001833 const int tmo = tcp_fin_time(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
1835 if (tmo > TCP_TIMEWAIT_LEN) {
David S. Miller52499af2006-07-31 22:32:09 -07001836 inet_csk_reset_keepalive_timer(sk,
1837 tmo - TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001839 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1840 goto out;
1841 }
1842 }
1843 }
1844 if (sk->sk_state != TCP_CLOSE) {
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001845 sk_mem_reclaim(sk);
Pavel Emelianove4fd5da2007-05-29 13:19:18 -07001846 if (tcp_too_many_orphans(sk,
1847 atomic_read(sk->sk_prot->orphan_count))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848 if (net_ratelimit())
1849 printk(KERN_INFO "TCP: too many of orphaned "
1850 "sockets\n");
1851 tcp_set_state(sk, TCP_CLOSE);
1852 tcp_send_active_reset(sk, GFP_ATOMIC);
1853 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1854 }
1855 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
1857 if (sk->sk_state == TCP_CLOSE)
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001858 inet_csk_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 /* Otherwise, socket is reprieved until protocol close. */
1860
1861out:
1862 bh_unlock_sock(sk);
1863 local_bh_enable();
1864 sock_put(sk);
1865}
1866
1867/* These states need RST on ABORT according to RFC793 */
1868
1869static inline int tcp_need_reset(int state)
1870{
1871 return (1 << state) &
1872 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1873 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1874}
1875
1876int tcp_disconnect(struct sock *sk, int flags)
1877{
1878 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001879 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 struct tcp_sock *tp = tcp_sk(sk);
1881 int err = 0;
1882 int old_state = sk->sk_state;
1883
1884 if (old_state != TCP_CLOSE)
1885 tcp_set_state(sk, TCP_CLOSE);
1886
1887 /* ABORT function of RFC793 */
1888 if (old_state == TCP_LISTEN) {
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001889 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 } else if (tcp_need_reset(old_state) ||
1891 (tp->snd_nxt != tp->write_seq &&
1892 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001893 /* The last check adjusts for discrepancy of Linux wrt. RFC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 * states
1895 */
1896 tcp_send_active_reset(sk, gfp_any());
1897 sk->sk_err = ECONNRESET;
1898 } else if (old_state == TCP_SYN_SENT)
1899 sk->sk_err = ECONNRESET;
1900
1901 tcp_clear_xmit_timers(sk);
1902 __skb_queue_purge(&sk->sk_receive_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001903 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 __skb_queue_purge(&tp->out_of_order_queue);
Chris Leech1a2449a2006-05-23 18:05:53 -07001905#ifdef CONFIG_NET_DMA
1906 __skb_queue_purge(&sk->sk_async_wait_queue);
1907#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908
1909 inet->dport = 0;
1910
1911 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1912 inet_reset_saddr(sk);
1913
1914 sk->sk_shutdown = 0;
1915 sock_reset_flag(sk, SOCK_DONE);
1916 tp->srtt = 0;
1917 if ((tp->write_seq += tp->max_window + 2) == 0)
1918 tp->write_seq = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001919 icsk->icsk_backoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001920 tp->snd_cwnd = 2;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001921 icsk->icsk_probes_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922 tp->packets_out = 0;
1923 tp->snd_ssthresh = 0x7fffffff;
1924 tp->snd_cwnd_cnt = 0;
Stephen Hemminger9772efb2005-11-10 17:09:53 -08001925 tp->bytes_acked = 0;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001926 tcp_set_ca_state(sk, TCP_CA_Open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927 tcp_clear_retrans(tp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001928 inet_csk_delack_init(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001929 tcp_init_send_head(sk);
Srinivas Ajib40b4f72007-05-03 17:32:28 -07001930 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 __sk_dst_reset(sk);
1932
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001933 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
1935 sk->sk_error_report(sk);
1936 return err;
1937}
1938
1939/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 * Socket option code for TCP.
1941 */
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001942static int do_tcp_setsockopt(struct sock *sk, int level,
1943 int optname, char __user *optval, int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001944{
1945 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001946 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001947 int val;
1948 int err = 0;
1949
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001950 /* This is a string value all the others are int's */
1951 if (optname == TCP_CONGESTION) {
1952 char name[TCP_CA_NAME_MAX];
1953
1954 if (optlen < 1)
1955 return -EINVAL;
1956
1957 val = strncpy_from_user(name, optval,
1958 min(TCP_CA_NAME_MAX-1, optlen));
1959 if (val < 0)
1960 return -EFAULT;
1961 name[val] = 0;
1962
1963 lock_sock(sk);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001964 err = tcp_set_congestion_control(sk, name);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001965 release_sock(sk);
1966 return err;
1967 }
1968
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969 if (optlen < sizeof(int))
1970 return -EINVAL;
1971
1972 if (get_user(val, (int __user *)optval))
1973 return -EFAULT;
1974
1975 lock_sock(sk);
1976
1977 switch (optname) {
1978 case TCP_MAXSEG:
1979 /* Values greater than interface MTU won't take effect. However
1980 * at the point when this call is done we typically don't yet
1981 * know which interface is going to be used */
1982 if (val < 8 || val > MAX_TCP_WINDOW) {
1983 err = -EINVAL;
1984 break;
1985 }
1986 tp->rx_opt.user_mss = val;
1987 break;
1988
1989 case TCP_NODELAY:
1990 if (val) {
1991 /* TCP_NODELAY is weaker than TCP_CORK, so that
1992 * this option on corked socket is remembered, but
1993 * it is not activated until cork is cleared.
1994 *
1995 * However, when TCP_NODELAY is set we make
1996 * an explicit push, which overrides even TCP_CORK
1997 * for currently queued segments.
1998 */
1999 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07002000 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002001 } else {
2002 tp->nonagle &= ~TCP_NAGLE_OFF;
2003 }
2004 break;
2005
2006 case TCP_CORK:
2007 /* When set indicates to always queue non-full frames.
2008 * Later the user clears this option and we transmit
2009 * any pending partial frames in the queue. This is
2010 * meant to be used alongside sendfile() to get properly
2011 * filled frames when the user (for example) must write
2012 * out headers with a write() call first and then use
2013 * sendfile to send out the data parts.
2014 *
2015 * TCP_CORK can be set together with TCP_NODELAY and it is
2016 * stronger than TCP_NODELAY.
2017 */
2018 if (val) {
2019 tp->nonagle |= TCP_NAGLE_CORK;
2020 } else {
2021 tp->nonagle &= ~TCP_NAGLE_CORK;
2022 if (tp->nonagle&TCP_NAGLE_OFF)
2023 tp->nonagle |= TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07002024 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002025 }
2026 break;
2027
2028 case TCP_KEEPIDLE:
2029 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2030 err = -EINVAL;
2031 else {
2032 tp->keepalive_time = val * HZ;
2033 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2034 !((1 << sk->sk_state) &
2035 (TCPF_CLOSE | TCPF_LISTEN))) {
2036 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2037 if (tp->keepalive_time > elapsed)
2038 elapsed = tp->keepalive_time - elapsed;
2039 else
2040 elapsed = 0;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002041 inet_csk_reset_keepalive_timer(sk, elapsed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 }
2043 }
2044 break;
2045 case TCP_KEEPINTVL:
2046 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2047 err = -EINVAL;
2048 else
2049 tp->keepalive_intvl = val * HZ;
2050 break;
2051 case TCP_KEEPCNT:
2052 if (val < 1 || val > MAX_TCP_KEEPCNT)
2053 err = -EINVAL;
2054 else
2055 tp->keepalive_probes = val;
2056 break;
2057 case TCP_SYNCNT:
2058 if (val < 1 || val > MAX_TCP_SYNCNT)
2059 err = -EINVAL;
2060 else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002061 icsk->icsk_syn_retries = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 break;
2063
2064 case TCP_LINGER2:
2065 if (val < 0)
2066 tp->linger2 = -1;
2067 else if (val > sysctl_tcp_fin_timeout / HZ)
2068 tp->linger2 = 0;
2069 else
2070 tp->linger2 = val * HZ;
2071 break;
2072
2073 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002074 icsk->icsk_accept_queue.rskq_defer_accept = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 if (val > 0) {
2076 /* Translate value in seconds to number of
2077 * retransmits */
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002078 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 val > ((TCP_TIMEOUT_INIT / HZ) <<
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002080 icsk->icsk_accept_queue.rskq_defer_accept))
2081 icsk->icsk_accept_queue.rskq_defer_accept++;
2082 icsk->icsk_accept_queue.rskq_defer_accept++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 }
2084 break;
2085
2086 case TCP_WINDOW_CLAMP:
2087 if (!val) {
2088 if (sk->sk_state != TCP_CLOSE) {
2089 err = -EINVAL;
2090 break;
2091 }
2092 tp->window_clamp = 0;
2093 } else
2094 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2095 SOCK_MIN_RCVBUF / 2 : val;
2096 break;
2097
2098 case TCP_QUICKACK:
2099 if (!val) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002100 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002102 icsk->icsk_ack.pingpong = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 if ((1 << sk->sk_state) &
2104 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002105 inet_csk_ack_scheduled(sk)) {
2106 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
Chris Leech0e4b4992006-05-23 18:00:16 -07002107 tcp_cleanup_rbuf(sk, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 if (!(val & 1))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002109 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 }
2111 }
2112 break;
2113
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002114#ifdef CONFIG_TCP_MD5SIG
2115 case TCP_MD5SIG:
2116 /* Read the IP->Key mappings from userspace */
2117 err = tp->af_specific->md5_parse(sk, optval, optlen);
2118 break;
2119#endif
2120
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 default:
2122 err = -ENOPROTOOPT;
2123 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002124 }
2125
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 release_sock(sk);
2127 return err;
2128}
2129
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002130int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2131 int optlen)
2132{
2133 struct inet_connection_sock *icsk = inet_csk(sk);
2134
2135 if (level != SOL_TCP)
2136 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2137 optval, optlen);
2138 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2139}
2140
2141#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002142int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2143 char __user *optval, int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002144{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08002145 if (level != SOL_TCP)
2146 return inet_csk_compat_setsockopt(sk, level, optname,
2147 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002148 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2149}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002150
2151EXPORT_SYMBOL(compat_tcp_setsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002152#endif
2153
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154/* Return information about state of tcp endpoint in API format. */
2155void tcp_get_info(struct sock *sk, struct tcp_info *info)
2156{
2157 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002158 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 u32 now = tcp_time_stamp;
2160
2161 memset(info, 0, sizeof(*info));
2162
2163 info->tcpi_state = sk->sk_state;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002164 info->tcpi_ca_state = icsk->icsk_ca_state;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002165 info->tcpi_retransmits = icsk->icsk_retransmits;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002166 info->tcpi_probes = icsk->icsk_probes_out;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002167 info->tcpi_backoff = icsk->icsk_backoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
2169 if (tp->rx_opt.tstamp_ok)
2170 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
Ilpo Järvinene60402d2007-08-09 15:14:46 +03002171 if (tcp_is_sack(tp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 info->tcpi_options |= TCPI_OPT_SACK;
2173 if (tp->rx_opt.wscale_ok) {
2174 info->tcpi_options |= TCPI_OPT_WSCALE;
2175 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2176 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002177 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
2179 if (tp->ecn_flags&TCP_ECN_OK)
2180 info->tcpi_options |= TCPI_OPT_ECN;
2181
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002182 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2183 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002184 info->tcpi_snd_mss = tp->mss_cache;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002185 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
Rick Jones5ee3afb2007-09-18 13:26:31 -07002187 if (sk->sk_state == TCP_LISTEN) {
2188 info->tcpi_unacked = sk->sk_ack_backlog;
2189 info->tcpi_sacked = sk->sk_max_ack_backlog;
2190 } else {
2191 info->tcpi_unacked = tp->packets_out;
2192 info->tcpi_sacked = tp->sacked_out;
2193 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 info->tcpi_lost = tp->lost_out;
2195 info->tcpi_retrans = tp->retrans_out;
2196 info->tcpi_fackets = tp->fackets_out;
2197
2198 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002199 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2201
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002202 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2204 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2205 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2206 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2207 info->tcpi_snd_cwnd = tp->snd_cwnd;
2208 info->tcpi_advmss = tp->advmss;
2209 info->tcpi_reordering = tp->reordering;
2210
2211 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2212 info->tcpi_rcv_space = tp->rcvq_space.space;
2213
2214 info->tcpi_total_retrans = tp->total_retrans;
2215}
2216
2217EXPORT_SYMBOL_GPL(tcp_get_info);
2218
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002219static int do_tcp_getsockopt(struct sock *sk, int level,
2220 int optname, char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221{
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002222 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223 struct tcp_sock *tp = tcp_sk(sk);
2224 int val, len;
2225
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 if (get_user(len, optlen))
2227 return -EFAULT;
2228
2229 len = min_t(unsigned int, len, sizeof(int));
2230
2231 if (len < 0)
2232 return -EINVAL;
2233
2234 switch (optname) {
2235 case TCP_MAXSEG:
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002236 val = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2238 val = tp->rx_opt.user_mss;
2239 break;
2240 case TCP_NODELAY:
2241 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2242 break;
2243 case TCP_CORK:
2244 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2245 break;
2246 case TCP_KEEPIDLE:
2247 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2248 break;
2249 case TCP_KEEPINTVL:
2250 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2251 break;
2252 case TCP_KEEPCNT:
2253 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2254 break;
2255 case TCP_SYNCNT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002256 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 break;
2258 case TCP_LINGER2:
2259 val = tp->linger2;
2260 if (val >= 0)
2261 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2262 break;
2263 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002264 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2265 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 break;
2267 case TCP_WINDOW_CLAMP:
2268 val = tp->window_clamp;
2269 break;
2270 case TCP_INFO: {
2271 struct tcp_info info;
2272
2273 if (get_user(len, optlen))
2274 return -EFAULT;
2275
2276 tcp_get_info(sk, &info);
2277
2278 len = min_t(unsigned int, len, sizeof(info));
2279 if (put_user(len, optlen))
2280 return -EFAULT;
2281 if (copy_to_user(optval, &info, len))
2282 return -EFAULT;
2283 return 0;
2284 }
2285 case TCP_QUICKACK:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002286 val = !icsk->icsk_ack.pingpong;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 break;
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002288
2289 case TCP_CONGESTION:
2290 if (get_user(len, optlen))
2291 return -EFAULT;
2292 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2293 if (put_user(len, optlen))
2294 return -EFAULT;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002295 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002296 return -EFAULT;
2297 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 default:
2299 return -ENOPROTOOPT;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002300 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
2302 if (put_user(len, optlen))
2303 return -EFAULT;
2304 if (copy_to_user(optval, &val, len))
2305 return -EFAULT;
2306 return 0;
2307}
2308
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002309int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2310 int __user *optlen)
2311{
2312 struct inet_connection_sock *icsk = inet_csk(sk);
2313
2314 if (level != SOL_TCP)
2315 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2316 optval, optlen);
2317 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2318}
2319
2320#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002321int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2322 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002323{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08002324 if (level != SOL_TCP)
2325 return inet_csk_compat_getsockopt(sk, level, optname,
2326 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002327 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2328}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002329
2330EXPORT_SYMBOL(compat_tcp_getsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002331#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
Herbert Xu576a30e2006-06-27 13:22:38 -07002333struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07002334{
2335 struct sk_buff *segs = ERR_PTR(-EINVAL);
2336 struct tcphdr *th;
2337 unsigned thlen;
2338 unsigned int seq;
Al Virod3bc23e2006-11-14 21:24:49 -08002339 __be32 delta;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002340 unsigned int oldlen;
2341 unsigned int len;
2342
2343 if (!pskb_may_pull(skb, sizeof(*th)))
2344 goto out;
2345
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002346 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002347 thlen = th->doff * 4;
2348 if (thlen < sizeof(*th))
2349 goto out;
2350
2351 if (!pskb_may_pull(skb, thlen))
2352 goto out;
2353
Herbert Xu0718bcc2006-06-25 23:55:46 -07002354 oldlen = (u16)~skb->len;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002355 __skb_pull(skb, thlen);
2356
Herbert Xu3820c3f2006-06-29 20:11:25 -07002357 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2358 /* Packet is from an untrusted source, reset gso_segs. */
Herbert Xubbcf4672006-07-03 19:38:35 -07002359 int type = skb_shinfo(skb)->gso_type;
2360 int mss;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002361
Herbert Xubbcf4672006-07-03 19:38:35 -07002362 if (unlikely(type &
2363 ~(SKB_GSO_TCPV4 |
2364 SKB_GSO_DODGY |
2365 SKB_GSO_TCP_ECN |
2366 SKB_GSO_TCPV6 |
2367 0) ||
2368 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2369 goto out;
2370
2371 mss = skb_shinfo(skb)->gso_size;
Ilpo Järvinen172589c2007-08-28 15:50:33 -07002372 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
Herbert Xu3820c3f2006-06-29 20:11:25 -07002373
2374 segs = NULL;
2375 goto out;
2376 }
2377
Herbert Xu576a30e2006-06-27 13:22:38 -07002378 segs = skb_segment(skb, features);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002379 if (IS_ERR(segs))
2380 goto out;
2381
2382 len = skb_shinfo(skb)->gso_size;
Herbert Xu0718bcc2006-06-25 23:55:46 -07002383 delta = htonl(oldlen + (thlen + len));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002384
2385 skb = segs;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002386 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002387 seq = ntohl(th->seq);
2388
2389 do {
2390 th->fin = th->psh = 0;
2391
Al Virod3bc23e2006-11-14 21:24:49 -08002392 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2393 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002394 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002395 th->check =
2396 csum_fold(csum_partial(skb_transport_header(skb),
2397 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002398
2399 seq += len;
2400 skb = skb->next;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002401 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002402
2403 th->seq = htonl(seq);
2404 th->cwr = 0;
2405 } while (skb->next);
2406
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002407 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002408 skb->data_len);
Al Virod3bc23e2006-11-14 21:24:49 -08002409 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2410 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002411 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002412 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2413 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002414
2415out:
2416 return segs;
2417}
Herbert Xuadcfc7d2006-06-30 13:36:15 -07002418EXPORT_SYMBOL(tcp_tso_segment);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002419
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002420#ifdef CONFIG_TCP_MD5SIG
2421static unsigned long tcp_md5sig_users;
2422static struct tcp_md5sig_pool **tcp_md5sig_pool;
2423static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2424
2425static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2426{
2427 int cpu;
2428 for_each_possible_cpu(cpu) {
2429 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2430 if (p) {
2431 if (p->md5_desc.tfm)
2432 crypto_free_hash(p->md5_desc.tfm);
2433 kfree(p);
2434 p = NULL;
2435 }
2436 }
2437 free_percpu(pool);
2438}
2439
2440void tcp_free_md5sig_pool(void)
2441{
2442 struct tcp_md5sig_pool **pool = NULL;
2443
David S. Miller2c4f6212007-02-20 23:51:47 -08002444 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002445 if (--tcp_md5sig_users == 0) {
2446 pool = tcp_md5sig_pool;
2447 tcp_md5sig_pool = NULL;
2448 }
David S. Miller2c4f6212007-02-20 23:51:47 -08002449 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002450 if (pool)
2451 __tcp_free_md5sig_pool(pool);
2452}
2453
2454EXPORT_SYMBOL(tcp_free_md5sig_pool);
2455
Adrian Bunkf5b99bc2006-11-30 17:22:29 -08002456static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002457{
2458 int cpu;
2459 struct tcp_md5sig_pool **pool;
2460
2461 pool = alloc_percpu(struct tcp_md5sig_pool *);
2462 if (!pool)
2463 return NULL;
2464
2465 for_each_possible_cpu(cpu) {
2466 struct tcp_md5sig_pool *p;
2467 struct crypto_hash *hash;
2468
2469 p = kzalloc(sizeof(*p), GFP_KERNEL);
2470 if (!p)
2471 goto out_free;
2472 *per_cpu_ptr(pool, cpu) = p;
2473
2474 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2475 if (!hash || IS_ERR(hash))
2476 goto out_free;
2477
2478 p->md5_desc.tfm = hash;
2479 }
2480 return pool;
2481out_free:
2482 __tcp_free_md5sig_pool(pool);
2483 return NULL;
2484}
2485
2486struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2487{
2488 struct tcp_md5sig_pool **pool;
2489 int alloc = 0;
2490
2491retry:
David S. Miller2c4f6212007-02-20 23:51:47 -08002492 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002493 pool = tcp_md5sig_pool;
2494 if (tcp_md5sig_users++ == 0) {
2495 alloc = 1;
David S. Miller2c4f6212007-02-20 23:51:47 -08002496 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002497 } else if (!pool) {
2498 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002499 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002500 cpu_relax();
2501 goto retry;
2502 } else
David S. Miller2c4f6212007-02-20 23:51:47 -08002503 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002504
2505 if (alloc) {
2506 /* we cannot hold spinlock here because this may sleep. */
2507 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
David S. Miller2c4f6212007-02-20 23:51:47 -08002508 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002509 if (!p) {
2510 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002511 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002512 return NULL;
2513 }
2514 pool = tcp_md5sig_pool;
2515 if (pool) {
2516 /* oops, it has already been assigned. */
David S. Miller2c4f6212007-02-20 23:51:47 -08002517 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002518 __tcp_free_md5sig_pool(p);
2519 } else {
2520 tcp_md5sig_pool = pool = p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002521 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002522 }
2523 }
2524 return pool;
2525}
2526
2527EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2528
2529struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2530{
2531 struct tcp_md5sig_pool **p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002532 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002533 p = tcp_md5sig_pool;
2534 if (p)
2535 tcp_md5sig_users++;
David S. Miller2c4f6212007-02-20 23:51:47 -08002536 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002537 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2538}
2539
2540EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2541
David S. Miller6931ba72006-12-13 16:25:44 -08002542void __tcp_put_md5sig_pool(void)
2543{
2544 tcp_free_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002545}
2546
2547EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2548#endif
2549
Andi Kleen4ac02ba2007-04-20 17:11:46 -07002550void tcp_done(struct sock *sk)
2551{
2552 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2553 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2554
2555 tcp_set_state(sk, TCP_CLOSE);
2556 tcp_clear_xmit_timers(sk);
2557
2558 sk->sk_shutdown = SHUTDOWN_MASK;
2559
2560 if (!sock_flag(sk, SOCK_DEAD))
2561 sk->sk_state_change(sk);
2562 else
2563 inet_csk_destroy_sock(sk);
2564}
2565EXPORT_SYMBOL_GPL(tcp_done);
2566
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002567extern struct tcp_congestion_ops tcp_reno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568
2569static __initdata unsigned long thash_entries;
2570static int __init set_thash_entries(char *str)
2571{
2572 if (!str)
2573 return 0;
2574 thash_entries = simple_strtoul(str, &str, 0);
2575 return 1;
2576}
2577__setup("thash_entries=", set_thash_entries);
2578
2579void __init tcp_init(void)
2580{
2581 struct sk_buff *skb = NULL;
John Heffner7b4f4b52006-03-25 01:34:07 -08002582 unsigned long limit;
2583 int order, i, max_share;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584
Pavel Emelyanov1f9e6362007-12-11 02:12:04 -08002585 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002586
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002587 tcp_hashinfo.bind_bucket_cachep =
2588 kmem_cache_create("tcp_bind_bucket",
2589 sizeof(struct inet_bind_bucket), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002590 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
Linus Torvalds1da177e2005-04-16 15:20:36 -07002592 /* Size and allocate the main established and bind bucket
2593 * hash tables.
2594 *
2595 * The methodology is similar to that of the buffer cache.
2596 */
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002597 tcp_hashinfo.ehash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598 alloc_large_system_hash("TCP established",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002599 sizeof(struct inet_ehash_bucket),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 thash_entries,
2601 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002602 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002603 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002604 &tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 NULL,
Jean Delvare0ccfe612007-10-30 00:59:25 -07002606 thash_entries ? 0 : 512 * 1024);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002607 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2608 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002609 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002610 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 }
Eric Dumazet230140c2007-11-07 02:40:20 -08002612 if (inet_ehash_locks_alloc(&tcp_hashinfo))
2613 panic("TCP: failed to alloc ehash_locks");
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002614 tcp_hashinfo.bhash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002615 alloc_large_system_hash("TCP bind",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002616 sizeof(struct inet_bind_hashbucket),
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002617 tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002619 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002620 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002621 &tcp_hashinfo.bhash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002622 NULL,
2623 64 * 1024);
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002624 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2625 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2626 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2627 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 }
2629
2630 /* Try to be a bit smarter and adjust defaults depending
2631 * on available memory.
2632 */
2633 for (order = 0; ((1 << order) << PAGE_SHIFT) <
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002634 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 order++)
2636 ;
Andi Kleene7626482005-06-13 14:24:52 -07002637 if (order >= 4) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002638 tcp_death_row.sysctl_max_tw_buckets = 180000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 sysctl_tcp_max_orphans = 4096 << (order - 4);
2640 sysctl_max_syn_backlog = 1024;
2641 } else if (order < 3) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002642 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 sysctl_tcp_max_orphans >>= (3 - order);
2644 sysctl_max_syn_backlog = 128;
2645 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646
John Heffner53cdcc02007-03-16 15:04:03 -07002647 /* Set the pressure threshold to be a fraction of global memory that
2648 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2649 * memory, with a floor of 128 pages.
2650 */
2651 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2652 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2653 limit = max(limit, 128UL);
2654 sysctl_tcp_mem[0] = limit / 4 * 3;
2655 sysctl_tcp_mem[1] = limit;
John Heffner52bf3762006-11-14 20:25:17 -08002656 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657
John Heffner53cdcc02007-03-16 15:04:03 -07002658 /* Set per-socket limits to no more than 1/128 the pressure threshold */
John Heffner7b4f4b52006-03-25 01:34:07 -08002659 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2660 max_share = min(4UL*1024*1024, limit);
2661
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002662 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
John Heffner7b4f4b52006-03-25 01:34:07 -08002663 sysctl_tcp_wmem[1] = 16*1024;
2664 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2665
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002666 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
John Heffner7b4f4b52006-03-25 01:34:07 -08002667 sysctl_tcp_rmem[1] = 87380;
2668 sysctl_tcp_rmem[2] = max(87380, max_share);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669
2670 printk(KERN_INFO "TCP: Hash tables configured "
2671 "(established %d bind %d)\n",
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002672 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002673
2674 tcp_register_congestion_control(&tcp_reno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675}
2676
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677EXPORT_SYMBOL(tcp_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678EXPORT_SYMBOL(tcp_disconnect);
2679EXPORT_SYMBOL(tcp_getsockopt);
2680EXPORT_SYMBOL(tcp_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681EXPORT_SYMBOL(tcp_poll);
2682EXPORT_SYMBOL(tcp_read_sock);
2683EXPORT_SYMBOL(tcp_recvmsg);
2684EXPORT_SYMBOL(tcp_sendmsg);
Jens Axboe9c55e012007-11-06 23:30:13 -08002685EXPORT_SYMBOL(tcp_splice_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686EXPORT_SYMBOL(tcp_sendpage);
2687EXPORT_SYMBOL(tcp_setsockopt);
2688EXPORT_SYMBOL(tcp_shutdown);
2689EXPORT_SYMBOL(tcp_statistics);