blob: 6efbae0e5512e7e0d29e8235a9fe9c5679452c4f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
Ilpo Järvinen172589c2007-08-28 15:50:33 -0700250#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251#include <linux/module.h>
252#include <linux/types.h>
253#include <linux/fcntl.h>
254#include <linux/poll.h>
255#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256#include <linux/fs.h>
Jens Axboe9c55e012007-11-06 23:30:13 -0800257#include <linux/skbuff.h>
258#include <linux/splice.h>
259#include <linux/net.h>
260#include <linux/socket.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261#include <linux/random.h>
262#include <linux/bootmem.h>
David S. Millerb8059ea2006-03-25 01:36:56 -0800263#include <linux/cache.h>
Herbert Xuf4c50d92006-06-22 03:02:40 -0700264#include <linux/err.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800265#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266
267#include <net/icmp.h>
268#include <net/tcp.h>
269#include <net/xfrm.h>
270#include <net/ip.h>
Chris Leech1a2449a2006-05-23 18:05:53 -0700271#include <net/netdma.h>
Jens Axboe9c55e012007-11-06 23:30:13 -0800272#include <net/sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
274#include <asm/uaccess.h>
275#include <asm/ioctls.h>
276
Brian Haleyab32ea52006-09-22 14:15:41 -0700277int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Eric Dumazetba899662005-08-26 12:05:31 -0700279DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281atomic_t tcp_orphan_count = ATOMIC_INIT(0);
282
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -0700283EXPORT_SYMBOL_GPL(tcp_orphan_count);
284
David S. Millerb8059ea2006-03-25 01:36:56 -0800285int sysctl_tcp_mem[3] __read_mostly;
286int sysctl_tcp_wmem[3] __read_mostly;
287int sysctl_tcp_rmem[3] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
289EXPORT_SYMBOL(sysctl_tcp_mem);
290EXPORT_SYMBOL(sysctl_tcp_rmem);
291EXPORT_SYMBOL(sysctl_tcp_wmem);
292
293atomic_t tcp_memory_allocated; /* Current allocated memory. */
294atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
295
296EXPORT_SYMBOL(tcp_memory_allocated);
297EXPORT_SYMBOL(tcp_sockets_allocated);
298
299/*
Jens Axboe9c55e012007-11-06 23:30:13 -0800300 * TCP splice context
301 */
302struct tcp_splice_state {
303 struct pipe_inode_info *pipe;
304 size_t len;
305 unsigned int flags;
306};
307
308/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 * Pressure flag: try to collapse.
310 * Technical note: it is used by multiple contexts non atomically.
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800311 * All the __sk_mem_schedule() is of this nature: accounting
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 * is strict, actions are advisory and have some latency.
313 */
Eric Dumazet4103f8c2007-03-27 13:58:31 -0700314int tcp_memory_pressure __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315
316EXPORT_SYMBOL(tcp_memory_pressure);
317
318void tcp_enter_memory_pressure(void)
319{
320 if (!tcp_memory_pressure) {
321 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
322 tcp_memory_pressure = 1;
323 }
324}
325
326EXPORT_SYMBOL(tcp_enter_memory_pressure);
327
328/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 * Wait for a TCP event.
330 *
331 * Note that we don't need to lock the socket, as the upper poll layers
332 * take care of normal races (between the test and the event) and we don't
333 * go look at any of the socket buffers directly.
334 */
335unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
336{
337 unsigned int mask;
338 struct sock *sk = sock->sk;
339 struct tcp_sock *tp = tcp_sk(sk);
340
341 poll_wait(file, sk->sk_sleep, wait);
342 if (sk->sk_state == TCP_LISTEN)
Arnaldo Carvalho de Melodc40c7b2005-08-23 21:52:58 -0700343 return inet_csk_listen_poll(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 /* Socket is not locked. We are protected from async events
346 by poll logic and correct handling of state changes
347 made by another threads is impossible in any case.
348 */
349
350 mask = 0;
351 if (sk->sk_err)
352 mask = POLLERR;
353
354 /*
355 * POLLHUP is certainly not done right. But poll() doesn't
356 * have a notion of HUP in just one direction, and for a
357 * socket the read side is more interesting.
358 *
359 * Some poll() documentation says that POLLHUP is incompatible
360 * with the POLLOUT/POLLWR flags, so somebody should check this
361 * all. But careful, it tends to be safer to return too many
362 * bits than too few, and you can easily break real applications
363 * if you don't tell them that something has hung up!
364 *
365 * Check-me.
366 *
367 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
368 * our fs/select.c). It means that after we received EOF,
369 * poll always returns immediately, making impossible poll() on write()
370 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
371 * if and only if shutdown has been made in both directions.
372 * Actually, it is interesting to look how Solaris and DUX
373 * solve this dilemma. I would prefer, if PULLHUP were maskable,
374 * then we could set it on SND_SHUTDOWN. BTW examples given
375 * in Stevens' books assume exactly this behaviour, it explains
376 * why PULLHUP is incompatible with POLLOUT. --ANK
377 *
378 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
379 * blocking on fresh not-connected or disconnected socket. --ANK
380 */
381 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
382 mask |= POLLHUP;
383 if (sk->sk_shutdown & RCV_SHUTDOWN)
Davide Libenzif348d702006-03-25 03:07:39 -0800384 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386 /* Connected? */
387 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
388 /* Potential race condition. If read of tp below will
389 * escape above sk->sk_state, we can be illegally awaken
390 * in SYN_* states. */
391 if ((tp->rcv_nxt != tp->copied_seq) &&
392 (tp->urg_seq != tp->copied_seq ||
393 tp->rcv_nxt != tp->copied_seq + 1 ||
394 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
395 mask |= POLLIN | POLLRDNORM;
396
397 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
398 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
399 mask |= POLLOUT | POLLWRNORM;
400 } else { /* send SIGIO later */
401 set_bit(SOCK_ASYNC_NOSPACE,
402 &sk->sk_socket->flags);
403 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
404
405 /* Race breaker. If space is freed after
406 * wspace test but before the flags are set,
407 * IO signal will be lost.
408 */
409 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
410 mask |= POLLOUT | POLLWRNORM;
411 }
412 }
413
414 if (tp->urg_data & TCP_URG_VALID)
415 mask |= POLLPRI;
416 }
417 return mask;
418}
419
420int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
421{
422 struct tcp_sock *tp = tcp_sk(sk);
423 int answ;
424
425 switch (cmd) {
426 case SIOCINQ:
427 if (sk->sk_state == TCP_LISTEN)
428 return -EINVAL;
429
430 lock_sock(sk);
431 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
432 answ = 0;
433 else if (sock_flag(sk, SOCK_URGINLINE) ||
434 !tp->urg_data ||
435 before(tp->urg_seq, tp->copied_seq) ||
436 !before(tp->urg_seq, tp->rcv_nxt)) {
437 answ = tp->rcv_nxt - tp->copied_seq;
438
439 /* Subtract 1, if FIN is in queue. */
440 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
441 answ -=
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -0700442 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 } else
444 answ = tp->urg_seq - tp->copied_seq;
445 release_sock(sk);
446 break;
447 case SIOCATMARK:
448 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
449 break;
450 case SIOCOUTQ:
451 if (sk->sk_state == TCP_LISTEN)
452 return -EINVAL;
453
454 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
455 answ = 0;
456 else
457 answ = tp->write_seq - tp->snd_una;
458 break;
459 default:
460 return -ENOIOCTLCMD;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700461 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462
463 return put_user(answ, (int __user *)arg);
464}
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
467{
468 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
469 tp->pushed_seq = tp->write_seq;
470}
471
472static inline int forced_push(struct tcp_sock *tp)
473{
474 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
475}
476
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700477static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700479 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo352d4802006-11-17 19:59:12 -0200480 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
481
482 skb->csum = 0;
483 tcb->seq = tcb->end_seq = tp->write_seq;
484 tcb->flags = TCPCB_FLAG_ACK;
485 tcb->sacked = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 skb_header_release(skb);
David S. Millerfe067e82007-03-07 12:12:44 -0800487 tcp_add_write_queue_tail(sk, skb);
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800488 sk->sk_wmem_queued += skb->truesize;
489 sk_mem_charge(sk, skb->truesize);
David S. Miller89ebd192005-08-23 10:13:06 -0700490 if (tp->nonagle & TCP_NAGLE_PUSH)
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900491 tp->nonagle &= ~TCP_NAGLE_PUSH;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492}
493
494static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
495 struct sk_buff *skb)
496{
497 if (flags & MSG_OOB) {
498 tp->urg_mode = 1;
499 tp->snd_up = tp->write_seq;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 }
501}
502
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700503static inline void tcp_push(struct sock *sk, int flags, int mss_now,
504 int nonagle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700506 struct tcp_sock *tp = tcp_sk(sk);
507
David S. Millerfe067e82007-03-07 12:12:44 -0800508 if (tcp_send_head(sk)) {
509 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 if (!(flags & MSG_MORE) || forced_push(tp))
511 tcp_mark_push(tp, skb);
512 tcp_mark_urg(tp, flags, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700513 __tcp_push_pending_frames(sk, mss_now,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
515 }
516}
517
Adrian Bunk6ff77512007-11-06 23:32:26 -0800518static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
519 unsigned int offset, size_t len)
Jens Axboe9c55e012007-11-06 23:30:13 -0800520{
521 struct tcp_splice_state *tss = rd_desc->arg.data;
522
523 return skb_splice_bits(skb, offset, tss->pipe, tss->len, tss->flags);
524}
525
526static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
527{
528 /* Store TCP splice context information in read_descriptor_t. */
529 read_descriptor_t rd_desc = {
530 .arg.data = tss,
531 };
532
533 return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
534}
535
536/**
537 * tcp_splice_read - splice data from TCP socket to a pipe
538 * @sock: socket to splice from
539 * @ppos: position (not valid)
540 * @pipe: pipe to splice to
541 * @len: number of bytes to splice
542 * @flags: splice modifier flags
543 *
544 * Description:
545 * Will read pages from given socket and fill them into a pipe.
546 *
547 **/
548ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
549 struct pipe_inode_info *pipe, size_t len,
550 unsigned int flags)
551{
552 struct sock *sk = sock->sk;
553 struct tcp_splice_state tss = {
554 .pipe = pipe,
555 .len = len,
556 .flags = flags,
557 };
558 long timeo;
559 ssize_t spliced;
560 int ret;
561
562 /*
563 * We can't seek on a socket input
564 */
565 if (unlikely(*ppos))
566 return -ESPIPE;
567
568 ret = spliced = 0;
569
570 lock_sock(sk);
571
572 timeo = sock_rcvtimeo(sk, flags & SPLICE_F_NONBLOCK);
573 while (tss.len) {
574 ret = __tcp_splice_read(sk, &tss);
575 if (ret < 0)
576 break;
577 else if (!ret) {
578 if (spliced)
579 break;
580 if (flags & SPLICE_F_NONBLOCK) {
581 ret = -EAGAIN;
582 break;
583 }
584 if (sock_flag(sk, SOCK_DONE))
585 break;
586 if (sk->sk_err) {
587 ret = sock_error(sk);
588 break;
589 }
590 if (sk->sk_shutdown & RCV_SHUTDOWN)
591 break;
592 if (sk->sk_state == TCP_CLOSE) {
593 /*
594 * This occurs when user tries to read
595 * from never connected socket.
596 */
597 if (!sock_flag(sk, SOCK_DONE))
598 ret = -ENOTCONN;
599 break;
600 }
601 if (!timeo) {
602 ret = -EAGAIN;
603 break;
604 }
605 sk_wait_data(sk, &timeo);
606 if (signal_pending(current)) {
607 ret = sock_intr_errno(timeo);
608 break;
609 }
610 continue;
611 }
612 tss.len -= ret;
613 spliced += ret;
614
615 release_sock(sk);
616 lock_sock(sk);
617
618 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
619 (sk->sk_shutdown & RCV_SHUTDOWN) || !timeo ||
620 signal_pending(current))
621 break;
622 }
623
624 release_sock(sk);
625
626 if (spliced)
627 return spliced;
628
629 return ret;
630}
631
Pavel Emelyanovdf97c702007-11-29 21:22:33 +1100632struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
Pavel Emelyanovf561d0f2007-11-29 20:28:50 +1100633{
634 struct sk_buff *skb;
635
636 /* The TCP header must be at least 32-bit aligned. */
637 size = ALIGN(size, 4);
638
639 skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
640 if (skb) {
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800641 if (sk_wmem_schedule(sk, skb->truesize)) {
Pavel Emelyanovf561d0f2007-11-29 20:28:50 +1100642 /*
643 * Make sure that we have exactly size bytes
644 * available to the caller, no more, no less.
645 */
646 skb_reserve(skb, skb_tailroom(skb) - size);
647 return skb;
648 }
649 __kfree_skb(skb);
650 } else {
651 sk->sk_prot->enter_memory_pressure();
652 sk_stream_moderate_sndbuf(sk);
653 }
654 return NULL;
655}
656
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
658 size_t psize, int flags)
659{
660 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700661 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 int err;
663 ssize_t copied;
664 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
665
666 /* Wait for a connection to finish. */
667 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
668 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
669 goto out_err;
670
671 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
672
673 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700674 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 copied = 0;
676
677 err = -EPIPE;
678 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
679 goto do_error;
680
681 while (psize > 0) {
David S. Millerfe067e82007-03-07 12:12:44 -0800682 struct sk_buff *skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 struct page *page = pages[poffset / PAGE_SIZE];
684 int copy, i, can_coalesce;
685 int offset = poffset % PAGE_SIZE;
686 int size = min_t(size_t, psize, PAGE_SIZE - offset);
687
David S. Millerfe067e82007-03-07 12:12:44 -0800688 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689new_segment:
690 if (!sk_stream_memory_free(sk))
691 goto wait_for_sndbuf;
692
Pavel Emelyanovdf97c702007-11-29 21:22:33 +1100693 skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 if (!skb)
695 goto wait_for_memory;
696
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700697 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700698 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 }
700
701 if (copy > size)
702 copy = size;
703
704 i = skb_shinfo(skb)->nr_frags;
705 can_coalesce = skb_can_coalesce(skb, i, page, offset);
706 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
707 tcp_mark_push(tp, skb);
708 goto new_segment;
709 }
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800710 if (!sk_wmem_schedule(sk, copy))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 goto wait_for_memory;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900712
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 if (can_coalesce) {
714 skb_shinfo(skb)->frags[i - 1].size += copy;
715 } else {
716 get_page(page);
717 skb_fill_page_desc(skb, i, page, offset, copy);
718 }
719
720 skb->len += copy;
721 skb->data_len += copy;
722 skb->truesize += copy;
723 sk->sk_wmem_queued += copy;
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800724 sk_mem_charge(sk, copy);
Patrick McHardy84fa7932006-08-29 16:44:56 -0700725 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 tp->write_seq += copy;
727 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700728 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
730 if (!copied)
731 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
732
733 copied += copy;
734 poffset += copy;
735 if (!(psize -= copy))
736 goto out;
737
Herbert Xu69d15062008-03-22 15:47:05 -0700738 if (skb->len < size_goal || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 continue;
740
741 if (forced_push(tp)) {
742 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700743 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800744 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 tcp_push_one(sk, mss_now);
746 continue;
747
748wait_for_sndbuf:
749 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
750wait_for_memory:
751 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700752 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
754 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
755 goto do_error;
756
757 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700758 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 }
760
761out:
762 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700763 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764 return copied;
765
766do_error:
767 if (copied)
768 goto out;
769out_err:
770 return sk_stream_error(sk, flags, err);
771}
772
773ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
774 size_t size, int flags)
775{
776 ssize_t res;
777 struct sock *sk = sock->sk;
778
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 if (!(sk->sk_route_caps & NETIF_F_SG) ||
Herbert Xu8648b302006-06-17 22:06:05 -0700780 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 return sock_no_sendpage(sock, page, offset, size, flags);
782
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 lock_sock(sk);
784 TCP_CHECK_TIMER(sk);
785 res = do_tcp_sendpages(sk, &page, offset, size, flags);
786 TCP_CHECK_TIMER(sk);
787 release_sock(sk);
788 return res;
789}
790
791#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
792#define TCP_OFF(sk) (sk->sk_sndmsg_off)
793
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700794static inline int select_size(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700796 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700797 int tmp = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
David S. Millerb4e26f52005-07-05 15:20:27 -0700799 if (sk->sk_route_caps & NETIF_F_SG) {
Herbert Xubcd76112006-06-30 13:36:35 -0700800 if (sk_can_gso(sk))
David S. Millerb4e26f52005-07-05 15:20:27 -0700801 tmp = 0;
802 else {
803 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
804
805 if (tmp >= pgbreak &&
806 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
807 tmp = pgbreak;
808 }
809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 return tmp;
812}
813
David S. Miller3516ffb2007-08-02 19:23:56 -0700814int tcp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 size_t size)
816{
David S. Miller3516ffb2007-08-02 19:23:56 -0700817 struct sock *sk = sock->sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 struct iovec *iov;
819 struct tcp_sock *tp = tcp_sk(sk);
820 struct sk_buff *skb;
821 int iovlen, flags;
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700822 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 int err, copied;
824 long timeo;
825
826 lock_sock(sk);
827 TCP_CHECK_TIMER(sk);
828
829 flags = msg->msg_flags;
830 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
831
832 /* Wait for a connection to finish. */
833 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
834 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
835 goto out_err;
836
837 /* This should be in poll */
838 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
839
840 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700841 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842
843 /* Ok commence sending. */
844 iovlen = msg->msg_iovlen;
845 iov = msg->msg_iov;
846 copied = 0;
847
848 err = -EPIPE;
849 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
850 goto do_error;
851
852 while (--iovlen >= 0) {
853 int seglen = iov->iov_len;
854 unsigned char __user *from = iov->iov_base;
855
856 iov++;
857
858 while (seglen > 0) {
859 int copy;
860
David S. Millerfe067e82007-03-07 12:12:44 -0800861 skb = tcp_write_queue_tail(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
David S. Millerfe067e82007-03-07 12:12:44 -0800863 if (!tcp_send_head(sk) ||
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700864 (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866new_segment:
867 /* Allocate new segment. If the interface is SG,
868 * allocate skb fitting to single page.
869 */
870 if (!sk_stream_memory_free(sk))
871 goto wait_for_sndbuf;
872
Pavel Emelyanovdf97c702007-11-29 21:22:33 +1100873 skb = sk_stream_alloc_skb(sk, select_size(sk),
874 sk->sk_allocation);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 if (!skb)
876 goto wait_for_memory;
877
878 /*
879 * Check whether we can use HW checksum.
880 */
Herbert Xu8648b302006-06-17 22:06:05 -0700881 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700882 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700884 skb_entail(sk, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700885 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886 }
887
888 /* Try to append data to the end of skb. */
889 if (copy > seglen)
890 copy = seglen;
891
892 /* Where to copy to? */
893 if (skb_tailroom(skb) > 0) {
894 /* We have some space in skb head. Superb! */
895 if (copy > skb_tailroom(skb))
896 copy = skb_tailroom(skb);
897 if ((err = skb_add_data(skb, from, copy)) != 0)
898 goto do_fault;
899 } else {
900 int merge = 0;
901 int i = skb_shinfo(skb)->nr_frags;
902 struct page *page = TCP_PAGE(sk);
903 int off = TCP_OFF(sk);
904
905 if (skb_can_coalesce(skb, i, page, off) &&
906 off != PAGE_SIZE) {
907 /* We can extend the last page
908 * fragment. */
909 merge = 1;
910 } else if (i == MAX_SKB_FRAGS ||
911 (!i &&
912 !(sk->sk_route_caps & NETIF_F_SG))) {
913 /* Need to add new fragment and cannot
914 * do this because interface is non-SG,
915 * or because all the page slots are
916 * busy. */
917 tcp_mark_push(tp, skb);
918 goto new_segment;
919 } else if (page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 if (off == PAGE_SIZE) {
921 put_page(page);
922 TCP_PAGE(sk) = page = NULL;
Herbert Xufb5f5e62005-09-05 18:55:48 -0700923 off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 }
Herbert Xuef015782005-09-01 17:48:59 -0700925 } else
Herbert Xufb5f5e62005-09-05 18:55:48 -0700926 off = 0;
Herbert Xuef015782005-09-01 17:48:59 -0700927
928 if (copy > PAGE_SIZE - off)
929 copy = PAGE_SIZE - off;
930
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800931 if (!sk_wmem_schedule(sk, copy))
Herbert Xuef015782005-09-01 17:48:59 -0700932 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933
934 if (!page) {
935 /* Allocate new cache page. */
936 if (!(page = sk_stream_alloc_page(sk)))
937 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 }
939
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 /* Time to copy data. We are close to
941 * the end! */
942 err = skb_copy_to_page(sk, from, skb, page,
943 off, copy);
944 if (err) {
945 /* If this page was new, give it to the
946 * socket so it does not get leaked.
947 */
948 if (!TCP_PAGE(sk)) {
949 TCP_PAGE(sk) = page;
950 TCP_OFF(sk) = 0;
951 }
952 goto do_error;
953 }
954
955 /* Update the skb. */
956 if (merge) {
957 skb_shinfo(skb)->frags[i - 1].size +=
958 copy;
959 } else {
960 skb_fill_page_desc(skb, i, page, off, copy);
961 if (TCP_PAGE(sk)) {
962 get_page(page);
963 } else if (off + copy < PAGE_SIZE) {
964 get_page(page);
965 TCP_PAGE(sk) = page;
966 }
967 }
968
969 TCP_OFF(sk) = off + copy;
970 }
971
972 if (!copied)
973 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
974
975 tp->write_seq += copy;
976 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700977 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978
979 from += copy;
980 copied += copy;
981 if ((seglen -= copy) == 0 && iovlen == 0)
982 goto out;
983
Herbert Xu69d15062008-03-22 15:47:05 -0700984 if (skb->len < size_goal || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 continue;
986
987 if (forced_push(tp)) {
988 tcp_mark_push(tp, skb);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700989 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
David S. Millerfe067e82007-03-07 12:12:44 -0800990 } else if (skb == tcp_send_head(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 tcp_push_one(sk, mss_now);
992 continue;
993
994wait_for_sndbuf:
995 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
996wait_for_memory:
997 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700998 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
1000 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1001 goto do_error;
1002
1003 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -07001004 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 }
1006 }
1007
1008out:
1009 if (copied)
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001010 tcp_push(sk, flags, mss_now, tp->nonagle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 TCP_CHECK_TIMER(sk);
1012 release_sock(sk);
1013 return copied;
1014
1015do_fault:
1016 if (!skb->len) {
David S. Millerfe067e82007-03-07 12:12:44 -08001017 tcp_unlink_write_queue(skb, sk);
1018 /* It is the one place in all of TCP, except connection
1019 * reset, where we can be unlinking the send_head.
1020 */
1021 tcp_check_send_head(sk, skb);
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001022 sk_wmem_free_skb(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 }
1024
1025do_error:
1026 if (copied)
1027 goto out;
1028out_err:
1029 err = sk_stream_error(sk, flags, err);
1030 TCP_CHECK_TIMER(sk);
1031 release_sock(sk);
1032 return err;
1033}
1034
1035/*
1036 * Handle reading urgent data. BSD has very simple semantics for
1037 * this, no blocking and very strange errors 8)
1038 */
1039
1040static int tcp_recv_urg(struct sock *sk, long timeo,
1041 struct msghdr *msg, int len, int flags,
1042 int *addr_len)
1043{
1044 struct tcp_sock *tp = tcp_sk(sk);
1045
1046 /* No URG data to read. */
1047 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1048 tp->urg_data == TCP_URG_READ)
1049 return -EINVAL; /* Yes this is right ! */
1050
1051 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1052 return -ENOTCONN;
1053
1054 if (tp->urg_data & TCP_URG_VALID) {
1055 int err = 0;
1056 char c = tp->urg_data;
1057
1058 if (!(flags & MSG_PEEK))
1059 tp->urg_data = TCP_URG_READ;
1060
1061 /* Read urgent data. */
1062 msg->msg_flags |= MSG_OOB;
1063
1064 if (len > 0) {
1065 if (!(flags & MSG_TRUNC))
1066 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1067 len = 1;
1068 } else
1069 msg->msg_flags |= MSG_TRUNC;
1070
1071 return err ? -EFAULT : len;
1072 }
1073
1074 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1075 return 0;
1076
1077 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1078 * the available implementations agree in this case:
1079 * this call should never block, independent of the
1080 * blocking state of the socket.
1081 * Mike <pall@rz.uni-karlsruhe.de>
1082 */
1083 return -EAGAIN;
1084}
1085
1086/* Clean up the receive buffer for full frames taken by the user,
1087 * then send an ACK if necessary. COPIED is the number of bytes
1088 * tcp_recvmsg has given to the user so far, it speeds up the
1089 * calculation of whether or not we must ACK for the sake of
1090 * a window update.
1091 */
Chris Leech0e4b4992006-05-23 18:00:16 -07001092void tcp_cleanup_rbuf(struct sock *sk, int copied)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093{
1094 struct tcp_sock *tp = tcp_sk(sk);
1095 int time_to_ack = 0;
1096
1097#if TCP_DEBUG
1098 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1099
1100 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1101#endif
1102
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001103 if (inet_csk_ack_scheduled(sk)) {
1104 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 /* Delayed ACKs frequently hit locked sockets during bulk
1106 * receive. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001107 if (icsk->icsk_ack.blocked ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108 /* Once-per-two-segments ACK was not sent by tcp_input.c */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001109 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110 /*
1111 * If this read emptied read buffer, we send ACK, if
1112 * connection is not bidirectional, user drained
1113 * receive buffer and there was a small segment
1114 * in queue.
1115 */
Alexey Kuznetsov1ef96962006-09-19 12:52:50 -07001116 (copied > 0 &&
1117 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1118 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1119 !icsk->icsk_ack.pingpong)) &&
1120 !atomic_read(&sk->sk_rmem_alloc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 time_to_ack = 1;
1122 }
1123
1124 /* We send an ACK if we can now advertise a non-zero window
1125 * which has been raised "significantly".
1126 *
1127 * Even if window raised up to infinity, do not send window open ACK
1128 * in states, where we will not receive more. It is useless.
1129 */
1130 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1131 __u32 rcv_window_now = tcp_receive_window(tp);
1132
1133 /* Optimize, __tcp_select_window() is not cheap. */
1134 if (2*rcv_window_now <= tp->window_clamp) {
1135 __u32 new_window = __tcp_select_window(sk);
1136
1137 /* Send ACK now, if this read freed lots of space
1138 * in our buffer. Certainly, new_window is new window.
1139 * We can advertise it now, if it is not less than current one.
1140 * "Lots" means "at least twice" here.
1141 */
1142 if (new_window && new_window >= 2 * rcv_window_now)
1143 time_to_ack = 1;
1144 }
1145 }
1146 if (time_to_ack)
1147 tcp_send_ack(sk);
1148}
1149
1150static void tcp_prequeue_process(struct sock *sk)
1151{
1152 struct sk_buff *skb;
1153 struct tcp_sock *tp = tcp_sk(sk);
1154
David S. Millerb03efcf2005-07-08 14:57:23 -07001155 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
1157 /* RX process wants to run with disabled BHs, though it is not
1158 * necessary */
1159 local_bh_disable();
1160 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1161 sk->sk_backlog_rcv(sk, skb);
1162 local_bh_enable();
1163
1164 /* Clear memory counter. */
1165 tp->ucopy.memory = 0;
1166}
1167
1168static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1169{
1170 struct sk_buff *skb;
1171 u32 offset;
1172
1173 skb_queue_walk(&sk->sk_receive_queue, skb) {
1174 offset = seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001175 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 offset--;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001177 if (offset < skb->len || tcp_hdr(skb)->fin) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 *off = offset;
1179 return skb;
1180 }
1181 }
1182 return NULL;
1183}
1184
1185/*
1186 * This routine provides an alternative to tcp_recvmsg() for routines
1187 * that would like to handle copying from skbuffs directly in 'sendfile'
1188 * fashion.
1189 * Note:
1190 * - It is assumed that the socket was locked by the caller.
1191 * - The routine does not block.
1192 * - At present, there is no support for reading OOB data
1193 * or for 'peeking' the socket using this routine
1194 * (although both would be easy to implement).
1195 */
1196int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1197 sk_read_actor_t recv_actor)
1198{
1199 struct sk_buff *skb;
1200 struct tcp_sock *tp = tcp_sk(sk);
1201 u32 seq = tp->copied_seq;
1202 u32 offset;
1203 int copied = 0;
1204
1205 if (sk->sk_state == TCP_LISTEN)
1206 return -ENOTCONN;
1207 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1208 if (offset < skb->len) {
1209 size_t used, len;
1210
1211 len = skb->len - offset;
1212 /* Stop reading if we hit a patch of urgent data */
1213 if (tp->urg_data) {
1214 u32 urg_offset = tp->urg_seq - seq;
1215 if (urg_offset < len)
1216 len = urg_offset;
1217 if (!len)
1218 break;
1219 }
1220 used = recv_actor(desc, skb, offset, len);
Jens Axboeddb61a52007-06-23 23:07:50 -07001221 if (used < 0) {
1222 if (!copied)
1223 copied = used;
1224 break;
1225 } else if (used <= len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 seq += used;
1227 copied += used;
1228 offset += used;
1229 }
Octavian Purdila293ad602008-06-04 15:45:58 -07001230 /*
1231 * If recv_actor drops the lock (e.g. TCP splice
1232 * receive) the skb pointer might be invalid when
1233 * getting here: tcp_collapse might have deleted it
1234 * while aggregating skbs from the socket queue.
1235 */
1236 skb = tcp_recv_skb(sk, seq-1, &offset);
1237 if (!skb || (offset+1 != skb->len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 break;
1239 }
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001240 if (tcp_hdr(skb)->fin) {
Chris Leech624d1162006-05-23 18:01:28 -07001241 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 ++seq;
1243 break;
1244 }
Chris Leech624d1162006-05-23 18:01:28 -07001245 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 if (!desc->count)
1247 break;
1248 }
1249 tp->copied_seq = seq;
1250
1251 tcp_rcv_space_adjust(sk);
1252
1253 /* Clean up data we have read: This will do ACK frames. */
Jens Axboeddb61a52007-06-23 23:07:50 -07001254 if (copied > 0)
Chris Leech0e4b4992006-05-23 18:00:16 -07001255 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 return copied;
1257}
1258
1259/*
1260 * This routine copies from a sock struct into the user buffer.
1261 *
1262 * Technical note: in 2.3 we work on _locked_ socket, so that
1263 * tricks with *seq access order and skb->users are not required.
1264 * Probably, code can be easily improved even more.
1265 */
1266
1267int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1268 size_t len, int nonblock, int flags, int *addr_len)
1269{
1270 struct tcp_sock *tp = tcp_sk(sk);
1271 int copied = 0;
1272 u32 peek_seq;
1273 u32 *seq;
1274 unsigned long used;
1275 int err;
1276 int target; /* Read at least this many bytes */
1277 long timeo;
1278 struct task_struct *user_recv = NULL;
Chris Leech1a2449a2006-05-23 18:05:53 -07001279 int copied_early = 0;
Chris Leech2b1244a2007-03-08 09:57:36 -08001280 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281
1282 lock_sock(sk);
1283
1284 TCP_CHECK_TIMER(sk);
1285
1286 err = -ENOTCONN;
1287 if (sk->sk_state == TCP_LISTEN)
1288 goto out;
1289
1290 timeo = sock_rcvtimeo(sk, nonblock);
1291
1292 /* Urgent data needs to be handled specially. */
1293 if (flags & MSG_OOB)
1294 goto recv_urg;
1295
1296 seq = &tp->copied_seq;
1297 if (flags & MSG_PEEK) {
1298 peek_seq = tp->copied_seq;
1299 seq = &peek_seq;
1300 }
1301
1302 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1303
Chris Leech1a2449a2006-05-23 18:05:53 -07001304#ifdef CONFIG_NET_DMA
1305 tp->ucopy.dma_chan = NULL;
1306 preempt_disable();
Chris Leech2b1244a2007-03-08 09:57:36 -08001307 skb = skb_peek_tail(&sk->sk_receive_queue);
Andrew Mortone00c5d82007-03-08 09:57:36 -08001308 {
1309 int available = 0;
1310
1311 if (skb)
1312 available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1313 if ((available < target) &&
1314 (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1315 !sysctl_tcp_low_latency &&
1316 __get_cpu_var(softnet_data).net_dma) {
1317 preempt_enable_no_resched();
1318 tp->ucopy.pinned_list =
1319 dma_pin_iovec_pages(msg->msg_iov, len);
1320 } else {
1321 preempt_enable_no_resched();
1322 }
1323 }
Chris Leech1a2449a2006-05-23 18:05:53 -07001324#endif
1325
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 do {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327 u32 offset;
1328
1329 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1330 if (tp->urg_data && tp->urg_seq == *seq) {
1331 if (copied)
1332 break;
1333 if (signal_pending(current)) {
1334 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1335 break;
1336 }
1337 }
1338
1339 /* Next get a buffer. */
1340
1341 skb = skb_peek(&sk->sk_receive_queue);
1342 do {
1343 if (!skb)
1344 break;
1345
1346 /* Now that we have two receive queues this
1347 * shouldn't happen.
1348 */
1349 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1350 printk(KERN_INFO "recvmsg bug: copied %X "
1351 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1352 break;
1353 }
1354 offset = *seq - TCP_SKB_CB(skb)->seq;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001355 if (tcp_hdr(skb)->syn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 offset--;
1357 if (offset < skb->len)
1358 goto found_ok_skb;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001359 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 goto found_fin_ok;
1361 BUG_TRAP(flags & MSG_PEEK);
1362 skb = skb->next;
1363 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1364
1365 /* Well, if we have backlog, try to process it now yet. */
1366
1367 if (copied >= target && !sk->sk_backlog.tail)
1368 break;
1369
1370 if (copied) {
1371 if (sk->sk_err ||
1372 sk->sk_state == TCP_CLOSE ||
1373 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1374 !timeo ||
1375 signal_pending(current) ||
1376 (flags & MSG_PEEK))
1377 break;
1378 } else {
1379 if (sock_flag(sk, SOCK_DONE))
1380 break;
1381
1382 if (sk->sk_err) {
1383 copied = sock_error(sk);
1384 break;
1385 }
1386
1387 if (sk->sk_shutdown & RCV_SHUTDOWN)
1388 break;
1389
1390 if (sk->sk_state == TCP_CLOSE) {
1391 if (!sock_flag(sk, SOCK_DONE)) {
1392 /* This occurs when user tries to read
1393 * from never connected socket.
1394 */
1395 copied = -ENOTCONN;
1396 break;
1397 }
1398 break;
1399 }
1400
1401 if (!timeo) {
1402 copied = -EAGAIN;
1403 break;
1404 }
1405
1406 if (signal_pending(current)) {
1407 copied = sock_intr_errno(timeo);
1408 break;
1409 }
1410 }
1411
Chris Leech0e4b4992006-05-23 18:00:16 -07001412 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413
David S. Miller7df55122005-06-18 23:01:10 -07001414 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 /* Install new reader */
1416 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1417 user_recv = current;
1418 tp->ucopy.task = user_recv;
1419 tp->ucopy.iov = msg->msg_iov;
1420 }
1421
1422 tp->ucopy.len = len;
1423
1424 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1425 (flags & (MSG_PEEK | MSG_TRUNC)));
1426
1427 /* Ugly... If prequeue is not empty, we have to
1428 * process it before releasing socket, otherwise
1429 * order will be broken at second iteration.
1430 * More elegant solution is required!!!
1431 *
1432 * Look: we have the following (pseudo)queues:
1433 *
1434 * 1. packets in flight
1435 * 2. backlog
1436 * 3. prequeue
1437 * 4. receive_queue
1438 *
1439 * Each queue can be processed only if the next ones
1440 * are empty. At this point we have empty receive_queue.
1441 * But prequeue _can_ be not empty after 2nd iteration,
1442 * when we jumped to start of loop because backlog
1443 * processing added something to receive_queue.
1444 * We cannot release_sock(), because backlog contains
1445 * packets arrived _after_ prequeued ones.
1446 *
1447 * Shortly, algorithm is clear --- to process all
1448 * the queues in order. We could make it more directly,
1449 * requeueing packets from backlog to prequeue, if
1450 * is not empty. It is more elegant, but eats cycles,
1451 * unfortunately.
1452 */
David S. Millerb03efcf2005-07-08 14:57:23 -07001453 if (!skb_queue_empty(&tp->ucopy.prequeue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 goto do_prequeue;
1455
1456 /* __ Set realtime policy in scheduler __ */
1457 }
1458
1459 if (copied >= target) {
1460 /* Do not sleep, just process backlog. */
1461 release_sock(sk);
1462 lock_sock(sk);
1463 } else
1464 sk_wait_data(sk, &timeo);
1465
Chris Leech1a2449a2006-05-23 18:05:53 -07001466#ifdef CONFIG_NET_DMA
1467 tp->ucopy.wakeup = 0;
1468#endif
1469
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 if (user_recv) {
1471 int chunk;
1472
1473 /* __ Restore normal policy in scheduler __ */
1474
1475 if ((chunk = len - tp->ucopy.len) != 0) {
1476 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1477 len -= chunk;
1478 copied += chunk;
1479 }
1480
1481 if (tp->rcv_nxt == tp->copied_seq &&
David S. Millerb03efcf2005-07-08 14:57:23 -07001482 !skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483do_prequeue:
1484 tcp_prequeue_process(sk);
1485
1486 if ((chunk = len - tp->ucopy.len) != 0) {
1487 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1488 len -= chunk;
1489 copied += chunk;
1490 }
1491 }
1492 }
1493 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1494 if (net_ratelimit())
1495 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
Pavel Emelyanovba25f9d2007-10-18 23:40:40 -07001496 current->comm, task_pid_nr(current));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 peek_seq = tp->copied_seq;
1498 }
1499 continue;
1500
1501 found_ok_skb:
1502 /* Ok so how much can we use? */
1503 used = skb->len - offset;
1504 if (len < used)
1505 used = len;
1506
1507 /* Do we have urgent data here? */
1508 if (tp->urg_data) {
1509 u32 urg_offset = tp->urg_seq - *seq;
1510 if (urg_offset < used) {
1511 if (!urg_offset) {
1512 if (!sock_flag(sk, SOCK_URGINLINE)) {
1513 ++*seq;
1514 offset++;
1515 used--;
1516 if (!used)
1517 goto skip_copy;
1518 }
1519 } else
1520 used = urg_offset;
1521 }
1522 }
1523
1524 if (!(flags & MSG_TRUNC)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001525#ifdef CONFIG_NET_DMA
1526 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1527 tp->ucopy.dma_chan = get_softnet_dma();
1528
1529 if (tp->ucopy.dma_chan) {
1530 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1531 tp->ucopy.dma_chan, skb, offset,
1532 msg->msg_iov, used,
1533 tp->ucopy.pinned_list);
1534
1535 if (tp->ucopy.dma_cookie < 0) {
1536
1537 printk(KERN_ALERT "dma_cookie < 0\n");
1538
1539 /* Exception. Bailout! */
1540 if (!copied)
1541 copied = -EFAULT;
1542 break;
1543 }
1544 if ((offset + used) == skb->len)
1545 copied_early = 1;
1546
1547 } else
1548#endif
1549 {
1550 err = skb_copy_datagram_iovec(skb, offset,
1551 msg->msg_iov, used);
1552 if (err) {
1553 /* Exception. Bailout! */
1554 if (!copied)
1555 copied = -EFAULT;
1556 break;
1557 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 }
1559 }
1560
1561 *seq += used;
1562 copied += used;
1563 len -= used;
1564
1565 tcp_rcv_space_adjust(sk);
1566
1567skip_copy:
1568 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1569 tp->urg_data = 0;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001570 tcp_fast_path_check(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571 }
1572 if (used + offset < skb->len)
1573 continue;
1574
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001575 if (tcp_hdr(skb)->fin)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 goto found_fin_ok;
Chris Leech1a2449a2006-05-23 18:05:53 -07001577 if (!(flags & MSG_PEEK)) {
1578 sk_eat_skb(sk, skb, copied_early);
1579 copied_early = 0;
1580 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 continue;
1582
1583 found_fin_ok:
1584 /* Process the FIN. */
1585 ++*seq;
Chris Leech1a2449a2006-05-23 18:05:53 -07001586 if (!(flags & MSG_PEEK)) {
1587 sk_eat_skb(sk, skb, copied_early);
1588 copied_early = 0;
1589 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 break;
1591 } while (len > 0);
1592
1593 if (user_recv) {
David S. Millerb03efcf2005-07-08 14:57:23 -07001594 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595 int chunk;
1596
1597 tp->ucopy.len = copied > 0 ? len : 0;
1598
1599 tcp_prequeue_process(sk);
1600
1601 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1602 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1603 len -= chunk;
1604 copied += chunk;
1605 }
1606 }
1607
1608 tp->ucopy.task = NULL;
1609 tp->ucopy.len = 0;
1610 }
1611
Chris Leech1a2449a2006-05-23 18:05:53 -07001612#ifdef CONFIG_NET_DMA
1613 if (tp->ucopy.dma_chan) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001614 dma_cookie_t done, used;
1615
1616 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1617
1618 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001619 tp->ucopy.dma_cookie, &done,
1620 &used) == DMA_IN_PROGRESS) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001621 /* do partial cleanup of sk_async_wait_queue */
1622 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1623 (dma_async_is_complete(skb->dma_cookie, done,
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09001624 used) == DMA_SUCCESS)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001625 __skb_dequeue(&sk->sk_async_wait_queue);
1626 kfree_skb(skb);
1627 }
1628 }
1629
1630 /* Safe to free early-copied skbs now */
1631 __skb_queue_purge(&sk->sk_async_wait_queue);
1632 dma_chan_put(tp->ucopy.dma_chan);
1633 tp->ucopy.dma_chan = NULL;
1634 }
1635 if (tp->ucopy.pinned_list) {
1636 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1637 tp->ucopy.pinned_list = NULL;
1638 }
1639#endif
1640
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641 /* According to UNIX98, msg_name/msg_namelen are ignored
1642 * on connected socket. I was just happy when found this 8) --ANK
1643 */
1644
1645 /* Clean up data we have read: This will do ACK frames. */
Chris Leech0e4b4992006-05-23 18:00:16 -07001646 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001647
1648 TCP_CHECK_TIMER(sk);
1649 release_sock(sk);
1650 return copied;
1651
1652out:
1653 TCP_CHECK_TIMER(sk);
1654 release_sock(sk);
1655 return err;
1656
1657recv_urg:
1658 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1659 goto out;
1660}
1661
Ilpo Järvinen490d5042008-01-12 03:17:20 -08001662void tcp_set_state(struct sock *sk, int state)
1663{
1664 int oldstate = sk->sk_state;
1665
1666 switch (state) {
1667 case TCP_ESTABLISHED:
1668 if (oldstate != TCP_ESTABLISHED)
1669 TCP_INC_STATS(TCP_MIB_CURRESTAB);
1670 break;
1671
1672 case TCP_CLOSE:
1673 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1674 TCP_INC_STATS(TCP_MIB_ESTABRESETS);
1675
1676 sk->sk_prot->unhash(sk);
1677 if (inet_csk(sk)->icsk_bind_hash &&
1678 !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
Arnaldo Carvalho de Meloab1e0a12008-02-03 04:06:04 -08001679 inet_put_port(sk);
Ilpo Järvinen490d5042008-01-12 03:17:20 -08001680 /* fall through */
1681 default:
1682 if (oldstate==TCP_ESTABLISHED)
1683 TCP_DEC_STATS(TCP_MIB_CURRESTAB);
1684 }
1685
1686 /* Change state AFTER socket is unhashed to avoid closed
1687 * socket sitting in hash tables.
1688 */
1689 sk->sk_state = state;
1690
1691#ifdef STATE_TRACE
1692 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1693#endif
1694}
1695EXPORT_SYMBOL_GPL(tcp_set_state);
1696
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697/*
1698 * State processing on a close. This implements the state shift for
1699 * sending our FIN frame. Note that we only send a FIN for some
1700 * states. A shutdown() may have already sent the FIN, or we may be
1701 * closed.
1702 */
1703
Arjan van de Ven9b5b5cf2005-11-29 16:21:38 -08001704static const unsigned char new_state[16] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 /* current state: new state: action: */
1706 /* (Invalid) */ TCP_CLOSE,
1707 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1708 /* TCP_SYN_SENT */ TCP_CLOSE,
1709 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1710 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1711 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1712 /* TCP_TIME_WAIT */ TCP_CLOSE,
1713 /* TCP_CLOSE */ TCP_CLOSE,
1714 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1715 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1716 /* TCP_LISTEN */ TCP_CLOSE,
1717 /* TCP_CLOSING */ TCP_CLOSING,
1718};
1719
1720static int tcp_close_state(struct sock *sk)
1721{
1722 int next = (int)new_state[sk->sk_state];
1723 int ns = next & TCP_STATE_MASK;
1724
1725 tcp_set_state(sk, ns);
1726
1727 return next & TCP_ACTION_FIN;
1728}
1729
1730/*
1731 * Shutdown the sending side of a connection. Much like close except
Satoru SATOH1f29b052008-04-21 02:27:58 -07001732 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 */
1734
1735void tcp_shutdown(struct sock *sk, int how)
1736{
1737 /* We need to grab some memory, and put together a FIN,
1738 * and then put it into the queue to be sent.
1739 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1740 */
1741 if (!(how & SEND_SHUTDOWN))
1742 return;
1743
1744 /* If we've already sent a FIN, or it's a closed state, skip this. */
1745 if ((1 << sk->sk_state) &
1746 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1747 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1748 /* Clear out any half completed packets. FIN if needed. */
1749 if (tcp_close_state(sk))
1750 tcp_send_fin(sk);
1751 }
1752}
1753
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754void tcp_close(struct sock *sk, long timeout)
1755{
1756 struct sk_buff *skb;
1757 int data_was_unread = 0;
Herbert Xu75c2d9072006-05-03 23:31:35 -07001758 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
1760 lock_sock(sk);
1761 sk->sk_shutdown = SHUTDOWN_MASK;
1762
1763 if (sk->sk_state == TCP_LISTEN) {
1764 tcp_set_state(sk, TCP_CLOSE);
1765
1766 /* Special case. */
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001767 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
1769 goto adjudge_to_death;
1770 }
1771
1772 /* We need to flush the recv. buffs. We do this only on the
1773 * descriptor close, not protocol-sourced closes, because the
1774 * reader process may not have drained the data yet!
1775 */
1776 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1777 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07001778 tcp_hdr(skb)->fin;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 data_was_unread += len;
1780 __kfree_skb(skb);
1781 }
1782
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001783 sk_mem_reclaim(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
Gerrit Renker65bb7232007-04-28 21:21:46 -07001785 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1786 * data was lost. To witness the awful effects of the old behavior of
1787 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1788 * GET in an FTP client, suspend the process, wait for the client to
1789 * advertise a zero window, then kill -9 the FTP client, wheee...
1790 * Note: timeout is always zero in such a case.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791 */
1792 if (data_was_unread) {
1793 /* Unread data was tossed, zap the connection. */
1794 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1795 tcp_set_state(sk, TCP_CLOSE);
1796 tcp_send_active_reset(sk, GFP_KERNEL);
1797 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1798 /* Check zero linger _after_ checking for unread data. */
1799 sk->sk_prot->disconnect(sk, 0);
1800 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1801 } else if (tcp_close_state(sk)) {
1802 /* We FIN if the application ate all the data before
1803 * zapping the connection.
1804 */
1805
1806 /* RED-PEN. Formally speaking, we have broken TCP state
1807 * machine. State transitions:
1808 *
1809 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1810 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1811 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1812 *
1813 * are legal only when FIN has been sent (i.e. in window),
1814 * rather than queued out of window. Purists blame.
1815 *
1816 * F.e. "RFC state" is ESTABLISHED,
1817 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1818 *
1819 * The visible declinations are that sometimes
1820 * we enter time-wait state, when it is not required really
1821 * (harmless), do not send active resets, when they are
1822 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1823 * they look as CLOSING or LAST_ACK for Linux)
1824 * Probably, I missed some more holelets.
1825 * --ANK
1826 */
1827 tcp_send_fin(sk);
1828 }
1829
1830 sk_stream_wait_close(sk, timeout);
1831
1832adjudge_to_death:
Herbert Xu75c2d9072006-05-03 23:31:35 -07001833 state = sk->sk_state;
1834 sock_hold(sk);
1835 sock_orphan(sk);
1836 atomic_inc(sk->sk_prot->orphan_count);
1837
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838 /* It is the last release_sock in its life. It will remove backlog. */
1839 release_sock(sk);
1840
1841
1842 /* Now socket is owned by kernel and we acquire BH lock
1843 to finish close. No need to check for user refs.
1844 */
1845 local_bh_disable();
1846 bh_lock_sock(sk);
1847 BUG_TRAP(!sock_owned_by_user(sk));
1848
Herbert Xu75c2d9072006-05-03 23:31:35 -07001849 /* Have we already been destroyed by a softirq or backlog? */
1850 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1851 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852
1853 /* This is a (useful) BSD violating of the RFC. There is a
1854 * problem with TCP as specified in that the other end could
1855 * keep a socket open forever with no application left this end.
1856 * We use a 3 minute timeout (about the same as BSD) then kill
1857 * our end. If they send after that then tough - BUT: long enough
1858 * that we won't make the old 4*rto = almost no time - whoops
1859 * reset mistake.
1860 *
1861 * Nope, it was not mistake. It is really desired behaviour
1862 * f.e. on http servers, when such sockets are useless, but
1863 * consume significant resources. Let's do it with special
1864 * linger2 option. --ANK
1865 */
1866
1867 if (sk->sk_state == TCP_FIN_WAIT2) {
1868 struct tcp_sock *tp = tcp_sk(sk);
1869 if (tp->linger2 < 0) {
1870 tcp_set_state(sk, TCP_CLOSE);
1871 tcp_send_active_reset(sk, GFP_ATOMIC);
1872 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1873 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001874 const int tmo = tcp_fin_time(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875
1876 if (tmo > TCP_TIMEWAIT_LEN) {
David S. Miller52499af2006-07-31 22:32:09 -07001877 inet_csk_reset_keepalive_timer(sk,
1878 tmo - TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1881 goto out;
1882 }
1883 }
1884 }
1885 if (sk->sk_state != TCP_CLOSE) {
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001886 sk_mem_reclaim(sk);
Pavel Emelianove4fd5da2007-05-29 13:19:18 -07001887 if (tcp_too_many_orphans(sk,
1888 atomic_read(sk->sk_prot->orphan_count))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 if (net_ratelimit())
1890 printk(KERN_INFO "TCP: too many of orphaned "
1891 "sockets\n");
1892 tcp_set_state(sk, TCP_CLOSE);
1893 tcp_send_active_reset(sk, GFP_ATOMIC);
1894 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1895 }
1896 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001897
1898 if (sk->sk_state == TCP_CLOSE)
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001899 inet_csk_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001900 /* Otherwise, socket is reprieved until protocol close. */
1901
1902out:
1903 bh_unlock_sock(sk);
1904 local_bh_enable();
1905 sock_put(sk);
1906}
1907
1908/* These states need RST on ABORT according to RFC793 */
1909
1910static inline int tcp_need_reset(int state)
1911{
1912 return (1 << state) &
1913 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1914 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1915}
1916
1917int tcp_disconnect(struct sock *sk, int flags)
1918{
1919 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001920 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001921 struct tcp_sock *tp = tcp_sk(sk);
1922 int err = 0;
1923 int old_state = sk->sk_state;
1924
1925 if (old_state != TCP_CLOSE)
1926 tcp_set_state(sk, TCP_CLOSE);
1927
1928 /* ABORT function of RFC793 */
1929 if (old_state == TCP_LISTEN) {
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001930 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931 } else if (tcp_need_reset(old_state) ||
1932 (tp->snd_nxt != tp->write_seq &&
1933 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001934 /* The last check adjusts for discrepancy of Linux wrt. RFC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 * states
1936 */
1937 tcp_send_active_reset(sk, gfp_any());
1938 sk->sk_err = ECONNRESET;
1939 } else if (old_state == TCP_SYN_SENT)
1940 sk->sk_err = ECONNRESET;
1941
1942 tcp_clear_xmit_timers(sk);
1943 __skb_queue_purge(&sk->sk_receive_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001944 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001945 __skb_queue_purge(&tp->out_of_order_queue);
Chris Leech1a2449a2006-05-23 18:05:53 -07001946#ifdef CONFIG_NET_DMA
1947 __skb_queue_purge(&sk->sk_async_wait_queue);
1948#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001949
1950 inet->dport = 0;
1951
1952 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1953 inet_reset_saddr(sk);
1954
1955 sk->sk_shutdown = 0;
1956 sock_reset_flag(sk, SOCK_DONE);
1957 tp->srtt = 0;
1958 if ((tp->write_seq += tp->max_window + 2) == 0)
1959 tp->write_seq = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001960 icsk->icsk_backoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001961 tp->snd_cwnd = 2;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001962 icsk->icsk_probes_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001963 tp->packets_out = 0;
1964 tp->snd_ssthresh = 0x7fffffff;
1965 tp->snd_cwnd_cnt = 0;
Stephen Hemminger9772efb2005-11-10 17:09:53 -08001966 tp->bytes_acked = 0;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001967 tcp_set_ca_state(sk, TCP_CA_Open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 tcp_clear_retrans(tp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001969 inet_csk_delack_init(sk);
David S. Millerfe067e82007-03-07 12:12:44 -08001970 tcp_init_send_head(sk);
Srinivas Ajib40b4f72007-05-03 17:32:28 -07001971 memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001972 __sk_dst_reset(sk);
1973
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001974 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001975
1976 sk->sk_error_report(sk);
1977 return err;
1978}
1979
1980/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 * Socket option code for TCP.
1982 */
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001983static int do_tcp_setsockopt(struct sock *sk, int level,
1984 int optname, char __user *optval, int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985{
1986 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001987 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001988 int val;
1989 int err = 0;
1990
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001991 /* This is a string value all the others are int's */
1992 if (optname == TCP_CONGESTION) {
1993 char name[TCP_CA_NAME_MAX];
1994
1995 if (optlen < 1)
1996 return -EINVAL;
1997
1998 val = strncpy_from_user(name, optval,
1999 min(TCP_CA_NAME_MAX-1, optlen));
2000 if (val < 0)
2001 return -EFAULT;
2002 name[val] = 0;
2003
2004 lock_sock(sk);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002005 err = tcp_set_congestion_control(sk, name);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002006 release_sock(sk);
2007 return err;
2008 }
2009
Linus Torvalds1da177e2005-04-16 15:20:36 -07002010 if (optlen < sizeof(int))
2011 return -EINVAL;
2012
2013 if (get_user(val, (int __user *)optval))
2014 return -EFAULT;
2015
2016 lock_sock(sk);
2017
2018 switch (optname) {
2019 case TCP_MAXSEG:
2020 /* Values greater than interface MTU won't take effect. However
2021 * at the point when this call is done we typically don't yet
2022 * know which interface is going to be used */
2023 if (val < 8 || val > MAX_TCP_WINDOW) {
2024 err = -EINVAL;
2025 break;
2026 }
2027 tp->rx_opt.user_mss = val;
2028 break;
2029
2030 case TCP_NODELAY:
2031 if (val) {
2032 /* TCP_NODELAY is weaker than TCP_CORK, so that
2033 * this option on corked socket is remembered, but
2034 * it is not activated until cork is cleared.
2035 *
2036 * However, when TCP_NODELAY is set we make
2037 * an explicit push, which overrides even TCP_CORK
2038 * for currently queued segments.
2039 */
2040 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07002041 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 } else {
2043 tp->nonagle &= ~TCP_NAGLE_OFF;
2044 }
2045 break;
2046
2047 case TCP_CORK:
2048 /* When set indicates to always queue non-full frames.
2049 * Later the user clears this option and we transmit
2050 * any pending partial frames in the queue. This is
2051 * meant to be used alongside sendfile() to get properly
2052 * filled frames when the user (for example) must write
2053 * out headers with a write() call first and then use
2054 * sendfile to send out the data parts.
2055 *
2056 * TCP_CORK can be set together with TCP_NODELAY and it is
2057 * stronger than TCP_NODELAY.
2058 */
2059 if (val) {
2060 tp->nonagle |= TCP_NAGLE_CORK;
2061 } else {
2062 tp->nonagle &= ~TCP_NAGLE_CORK;
2063 if (tp->nonagle&TCP_NAGLE_OFF)
2064 tp->nonagle |= TCP_NAGLE_PUSH;
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07002065 tcp_push_pending_frames(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 }
2067 break;
2068
2069 case TCP_KEEPIDLE:
2070 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2071 err = -EINVAL;
2072 else {
2073 tp->keepalive_time = val * HZ;
2074 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2075 !((1 << sk->sk_state) &
2076 (TCPF_CLOSE | TCPF_LISTEN))) {
2077 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2078 if (tp->keepalive_time > elapsed)
2079 elapsed = tp->keepalive_time - elapsed;
2080 else
2081 elapsed = 0;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002082 inet_csk_reset_keepalive_timer(sk, elapsed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 }
2084 }
2085 break;
2086 case TCP_KEEPINTVL:
2087 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2088 err = -EINVAL;
2089 else
2090 tp->keepalive_intvl = val * HZ;
2091 break;
2092 case TCP_KEEPCNT:
2093 if (val < 1 || val > MAX_TCP_KEEPCNT)
2094 err = -EINVAL;
2095 else
2096 tp->keepalive_probes = val;
2097 break;
2098 case TCP_SYNCNT:
2099 if (val < 1 || val > MAX_TCP_SYNCNT)
2100 err = -EINVAL;
2101 else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002102 icsk->icsk_syn_retries = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 break;
2104
2105 case TCP_LINGER2:
2106 if (val < 0)
2107 tp->linger2 = -1;
2108 else if (val > sysctl_tcp_fin_timeout / HZ)
2109 tp->linger2 = 0;
2110 else
2111 tp->linger2 = val * HZ;
2112 break;
2113
2114 case TCP_DEFER_ACCEPT:
Patrick McManusec3c0982008-03-21 16:33:01 -07002115 if (val < 0) {
2116 err = -EINVAL;
2117 } else {
2118 if (val > MAX_TCP_ACCEPT_DEFERRED)
2119 val = MAX_TCP_ACCEPT_DEFERRED;
2120 icsk->icsk_accept_queue.rskq_defer_accept = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 }
2122 break;
2123
2124 case TCP_WINDOW_CLAMP:
2125 if (!val) {
2126 if (sk->sk_state != TCP_CLOSE) {
2127 err = -EINVAL;
2128 break;
2129 }
2130 tp->window_clamp = 0;
2131 } else
2132 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2133 SOCK_MIN_RCVBUF / 2 : val;
2134 break;
2135
2136 case TCP_QUICKACK:
2137 if (!val) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002138 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002140 icsk->icsk_ack.pingpong = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 if ((1 << sk->sk_state) &
2142 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002143 inet_csk_ack_scheduled(sk)) {
2144 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
Chris Leech0e4b4992006-05-23 18:00:16 -07002145 tcp_cleanup_rbuf(sk, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 if (!(val & 1))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002147 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 }
2149 }
2150 break;
2151
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002152#ifdef CONFIG_TCP_MD5SIG
2153 case TCP_MD5SIG:
2154 /* Read the IP->Key mappings from userspace */
2155 err = tp->af_specific->md5_parse(sk, optval, optlen);
2156 break;
2157#endif
2158
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 default:
2160 err = -ENOPROTOOPT;
2161 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002162 }
2163
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 release_sock(sk);
2165 return err;
2166}
2167
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002168int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2169 int optlen)
2170{
2171 struct inet_connection_sock *icsk = inet_csk(sk);
2172
2173 if (level != SOL_TCP)
2174 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2175 optval, optlen);
2176 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2177}
2178
2179#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002180int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2181 char __user *optval, int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002182{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08002183 if (level != SOL_TCP)
2184 return inet_csk_compat_setsockopt(sk, level, optname,
2185 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002186 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2187}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002188
2189EXPORT_SYMBOL(compat_tcp_setsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002190#endif
2191
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192/* Return information about state of tcp endpoint in API format. */
2193void tcp_get_info(struct sock *sk, struct tcp_info *info)
2194{
2195 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002196 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 u32 now = tcp_time_stamp;
2198
2199 memset(info, 0, sizeof(*info));
2200
2201 info->tcpi_state = sk->sk_state;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002202 info->tcpi_ca_state = icsk->icsk_ca_state;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002203 info->tcpi_retransmits = icsk->icsk_retransmits;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002204 info->tcpi_probes = icsk->icsk_probes_out;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002205 info->tcpi_backoff = icsk->icsk_backoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
2207 if (tp->rx_opt.tstamp_ok)
2208 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
Ilpo Järvinene60402d2007-08-09 15:14:46 +03002209 if (tcp_is_sack(tp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 info->tcpi_options |= TCPI_OPT_SACK;
2211 if (tp->rx_opt.wscale_ok) {
2212 info->tcpi_options |= TCPI_OPT_WSCALE;
2213 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2214 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +09002215 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
2217 if (tp->ecn_flags&TCP_ECN_OK)
2218 info->tcpi_options |= TCPI_OPT_ECN;
2219
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002220 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2221 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002222 info->tcpi_snd_mss = tp->mss_cache;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002223 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Rick Jones5ee3afb2007-09-18 13:26:31 -07002225 if (sk->sk_state == TCP_LISTEN) {
2226 info->tcpi_unacked = sk->sk_ack_backlog;
2227 info->tcpi_sacked = sk->sk_max_ack_backlog;
2228 } else {
2229 info->tcpi_unacked = tp->packets_out;
2230 info->tcpi_sacked = tp->sacked_out;
2231 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 info->tcpi_lost = tp->lost_out;
2233 info->tcpi_retrans = tp->retrans_out;
2234 info->tcpi_fackets = tp->fackets_out;
2235
2236 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002237 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2239
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002240 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2242 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2243 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2244 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2245 info->tcpi_snd_cwnd = tp->snd_cwnd;
2246 info->tcpi_advmss = tp->advmss;
2247 info->tcpi_reordering = tp->reordering;
2248
2249 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2250 info->tcpi_rcv_space = tp->rcvq_space.space;
2251
2252 info->tcpi_total_retrans = tp->total_retrans;
2253}
2254
2255EXPORT_SYMBOL_GPL(tcp_get_info);
2256
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002257static int do_tcp_getsockopt(struct sock *sk, int level,
2258 int optname, char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259{
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002260 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 struct tcp_sock *tp = tcp_sk(sk);
2262 int val, len;
2263
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 if (get_user(len, optlen))
2265 return -EFAULT;
2266
2267 len = min_t(unsigned int, len, sizeof(int));
2268
2269 if (len < 0)
2270 return -EINVAL;
2271
2272 switch (optname) {
2273 case TCP_MAXSEG:
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002274 val = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2276 val = tp->rx_opt.user_mss;
2277 break;
2278 case TCP_NODELAY:
2279 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2280 break;
2281 case TCP_CORK:
2282 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2283 break;
2284 case TCP_KEEPIDLE:
2285 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2286 break;
2287 case TCP_KEEPINTVL:
2288 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2289 break;
2290 case TCP_KEEPCNT:
2291 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2292 break;
2293 case TCP_SYNCNT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002294 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 break;
2296 case TCP_LINGER2:
2297 val = tp->linger2;
2298 if (val >= 0)
2299 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2300 break;
2301 case TCP_DEFER_ACCEPT:
Patrick McManusec3c0982008-03-21 16:33:01 -07002302 val = icsk->icsk_accept_queue.rskq_defer_accept;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303 break;
2304 case TCP_WINDOW_CLAMP:
2305 val = tp->window_clamp;
2306 break;
2307 case TCP_INFO: {
2308 struct tcp_info info;
2309
2310 if (get_user(len, optlen))
2311 return -EFAULT;
2312
2313 tcp_get_info(sk, &info);
2314
2315 len = min_t(unsigned int, len, sizeof(info));
2316 if (put_user(len, optlen))
2317 return -EFAULT;
2318 if (copy_to_user(optval, &info, len))
2319 return -EFAULT;
2320 return 0;
2321 }
2322 case TCP_QUICKACK:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002323 val = !icsk->icsk_ack.pingpong;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 break;
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002325
2326 case TCP_CONGESTION:
2327 if (get_user(len, optlen))
2328 return -EFAULT;
2329 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2330 if (put_user(len, optlen))
2331 return -EFAULT;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002332 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002333 return -EFAULT;
2334 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002335 default:
2336 return -ENOPROTOOPT;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002337 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
2339 if (put_user(len, optlen))
2340 return -EFAULT;
2341 if (copy_to_user(optval, &val, len))
2342 return -EFAULT;
2343 return 0;
2344}
2345
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002346int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2347 int __user *optlen)
2348{
2349 struct inet_connection_sock *icsk = inet_csk(sk);
2350
2351 if (level != SOL_TCP)
2352 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2353 optval, optlen);
2354 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2355}
2356
2357#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002358int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2359 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002360{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08002361 if (level != SOL_TCP)
2362 return inet_csk_compat_getsockopt(sk, level, optname,
2363 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002364 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2365}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002366
2367EXPORT_SYMBOL(compat_tcp_getsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002368#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369
Herbert Xu576a30e2006-06-27 13:22:38 -07002370struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07002371{
2372 struct sk_buff *segs = ERR_PTR(-EINVAL);
2373 struct tcphdr *th;
2374 unsigned thlen;
2375 unsigned int seq;
Al Virod3bc23e2006-11-14 21:24:49 -08002376 __be32 delta;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002377 unsigned int oldlen;
2378 unsigned int len;
2379
2380 if (!pskb_may_pull(skb, sizeof(*th)))
2381 goto out;
2382
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002383 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002384 thlen = th->doff * 4;
2385 if (thlen < sizeof(*th))
2386 goto out;
2387
2388 if (!pskb_may_pull(skb, thlen))
2389 goto out;
2390
Herbert Xu0718bcc2006-06-25 23:55:46 -07002391 oldlen = (u16)~skb->len;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002392 __skb_pull(skb, thlen);
2393
Herbert Xu3820c3f2006-06-29 20:11:25 -07002394 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2395 /* Packet is from an untrusted source, reset gso_segs. */
Herbert Xubbcf4672006-07-03 19:38:35 -07002396 int type = skb_shinfo(skb)->gso_type;
2397 int mss;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002398
Herbert Xubbcf4672006-07-03 19:38:35 -07002399 if (unlikely(type &
2400 ~(SKB_GSO_TCPV4 |
2401 SKB_GSO_DODGY |
2402 SKB_GSO_TCP_ECN |
2403 SKB_GSO_TCPV6 |
2404 0) ||
2405 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2406 goto out;
2407
2408 mss = skb_shinfo(skb)->gso_size;
Ilpo Järvinen172589c2007-08-28 15:50:33 -07002409 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss);
Herbert Xu3820c3f2006-06-29 20:11:25 -07002410
2411 segs = NULL;
2412 goto out;
2413 }
2414
Herbert Xu576a30e2006-06-27 13:22:38 -07002415 segs = skb_segment(skb, features);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002416 if (IS_ERR(segs))
2417 goto out;
2418
2419 len = skb_shinfo(skb)->gso_size;
Herbert Xu0718bcc2006-06-25 23:55:46 -07002420 delta = htonl(oldlen + (thlen + len));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002421
2422 skb = segs;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002423 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002424 seq = ntohl(th->seq);
2425
2426 do {
2427 th->fin = th->psh = 0;
2428
Al Virod3bc23e2006-11-14 21:24:49 -08002429 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2430 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002431 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002432 th->check =
2433 csum_fold(csum_partial(skb_transport_header(skb),
2434 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002435
2436 seq += len;
2437 skb = skb->next;
Arnaldo Carvalho de Meloaa8223c2007-04-10 21:04:22 -07002438 th = tcp_hdr(skb);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002439
2440 th->seq = htonl(seq);
2441 th->cwr = 0;
2442 } while (skb->next);
2443
Arnaldo Carvalho de Melo27a884d2007-04-19 20:29:13 -07002444 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002445 skb->data_len);
Al Virod3bc23e2006-11-14 21:24:49 -08002446 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2447 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002448 if (skb->ip_summed != CHECKSUM_PARTIAL)
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002449 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2450 thlen, skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002451
2452out:
2453 return segs;
2454}
Herbert Xuadcfc7d2006-06-30 13:36:15 -07002455EXPORT_SYMBOL(tcp_tso_segment);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002456
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002457#ifdef CONFIG_TCP_MD5SIG
2458static unsigned long tcp_md5sig_users;
2459static struct tcp_md5sig_pool **tcp_md5sig_pool;
2460static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2461
YOSHIFUJI Hideaki8d26d762008-04-17 13:19:16 +09002462int tcp_calc_md5_hash(char *md5_hash, struct tcp_md5sig_key *key,
2463 int bplen,
2464 struct tcphdr *th, unsigned int tcplen,
2465 struct tcp_md5sig_pool *hp)
2466{
2467 struct scatterlist sg[4];
2468 __u16 data_len;
2469 int block = 0;
2470 __sum16 cksum;
2471 struct hash_desc *desc = &hp->md5_desc;
2472 int err;
2473 unsigned int nbytes = 0;
2474
2475 sg_init_table(sg, 4);
2476
2477 /* 1. The TCP pseudo-header */
2478 sg_set_buf(&sg[block++], &hp->md5_blk, bplen);
2479 nbytes += bplen;
2480
2481 /* 2. The TCP header, excluding options, and assuming a
2482 * checksum of zero
2483 */
2484 cksum = th->check;
2485 th->check = 0;
2486 sg_set_buf(&sg[block++], th, sizeof(*th));
2487 nbytes += sizeof(*th);
2488
2489 /* 3. The TCP segment data (if any) */
2490 data_len = tcplen - (th->doff << 2);
2491 if (data_len > 0) {
2492 u8 *data = (u8 *)th + (th->doff << 2);
2493 sg_set_buf(&sg[block++], data, data_len);
2494 nbytes += data_len;
2495 }
2496
2497 /* 4. an independently-specified key or password, known to both
2498 * TCPs and presumably connection-specific
2499 */
2500 sg_set_buf(&sg[block++], key->key, key->keylen);
2501 nbytes += key->keylen;
2502
2503 sg_mark_end(&sg[block - 1]);
2504
2505 /* Now store the hash into the packet */
2506 err = crypto_hash_init(desc);
2507 if (err) {
2508 if (net_ratelimit())
2509 printk(KERN_WARNING "%s(): hash_init failed\n", __func__);
2510 return -1;
2511 }
2512 err = crypto_hash_update(desc, sg, nbytes);
2513 if (err) {
2514 if (net_ratelimit())
2515 printk(KERN_WARNING "%s(): hash_update failed\n", __func__);
2516 return -1;
2517 }
2518 err = crypto_hash_final(desc, md5_hash);
2519 if (err) {
2520 if (net_ratelimit())
2521 printk(KERN_WARNING "%s(): hash_final failed\n", __func__);
2522 return -1;
2523 }
2524
2525 /* Reset header */
2526 th->check = cksum;
2527
2528 return 0;
2529}
2530EXPORT_SYMBOL(tcp_calc_md5_hash);
2531
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002532static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2533{
2534 int cpu;
2535 for_each_possible_cpu(cpu) {
2536 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2537 if (p) {
2538 if (p->md5_desc.tfm)
2539 crypto_free_hash(p->md5_desc.tfm);
2540 kfree(p);
2541 p = NULL;
2542 }
2543 }
2544 free_percpu(pool);
2545}
2546
2547void tcp_free_md5sig_pool(void)
2548{
2549 struct tcp_md5sig_pool **pool = NULL;
2550
David S. Miller2c4f6212007-02-20 23:51:47 -08002551 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002552 if (--tcp_md5sig_users == 0) {
2553 pool = tcp_md5sig_pool;
2554 tcp_md5sig_pool = NULL;
2555 }
David S. Miller2c4f6212007-02-20 23:51:47 -08002556 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002557 if (pool)
2558 __tcp_free_md5sig_pool(pool);
2559}
2560
2561EXPORT_SYMBOL(tcp_free_md5sig_pool);
2562
Adrian Bunkf5b99bc2006-11-30 17:22:29 -08002563static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002564{
2565 int cpu;
2566 struct tcp_md5sig_pool **pool;
2567
2568 pool = alloc_percpu(struct tcp_md5sig_pool *);
2569 if (!pool)
2570 return NULL;
2571
2572 for_each_possible_cpu(cpu) {
2573 struct tcp_md5sig_pool *p;
2574 struct crypto_hash *hash;
2575
2576 p = kzalloc(sizeof(*p), GFP_KERNEL);
2577 if (!p)
2578 goto out_free;
2579 *per_cpu_ptr(pool, cpu) = p;
2580
2581 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2582 if (!hash || IS_ERR(hash))
2583 goto out_free;
2584
2585 p->md5_desc.tfm = hash;
2586 }
2587 return pool;
2588out_free:
2589 __tcp_free_md5sig_pool(pool);
2590 return NULL;
2591}
2592
2593struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2594{
2595 struct tcp_md5sig_pool **pool;
2596 int alloc = 0;
2597
2598retry:
David S. Miller2c4f6212007-02-20 23:51:47 -08002599 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002600 pool = tcp_md5sig_pool;
2601 if (tcp_md5sig_users++ == 0) {
2602 alloc = 1;
David S. Miller2c4f6212007-02-20 23:51:47 -08002603 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002604 } else if (!pool) {
2605 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002606 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002607 cpu_relax();
2608 goto retry;
2609 } else
David S. Miller2c4f6212007-02-20 23:51:47 -08002610 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002611
2612 if (alloc) {
2613 /* we cannot hold spinlock here because this may sleep. */
2614 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
David S. Miller2c4f6212007-02-20 23:51:47 -08002615 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002616 if (!p) {
2617 tcp_md5sig_users--;
David S. Miller2c4f6212007-02-20 23:51:47 -08002618 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002619 return NULL;
2620 }
2621 pool = tcp_md5sig_pool;
2622 if (pool) {
2623 /* oops, it has already been assigned. */
David S. Miller2c4f6212007-02-20 23:51:47 -08002624 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002625 __tcp_free_md5sig_pool(p);
2626 } else {
2627 tcp_md5sig_pool = pool = p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002628 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002629 }
2630 }
2631 return pool;
2632}
2633
2634EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2635
2636struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2637{
2638 struct tcp_md5sig_pool **p;
David S. Miller2c4f6212007-02-20 23:51:47 -08002639 spin_lock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002640 p = tcp_md5sig_pool;
2641 if (p)
2642 tcp_md5sig_users++;
David S. Miller2c4f6212007-02-20 23:51:47 -08002643 spin_unlock_bh(&tcp_md5sig_pool_lock);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002644 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2645}
2646
2647EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2648
David S. Miller6931ba72006-12-13 16:25:44 -08002649void __tcp_put_md5sig_pool(void)
2650{
2651 tcp_free_md5sig_pool();
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002652}
2653
2654EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2655#endif
2656
Andi Kleen4ac02ba2007-04-20 17:11:46 -07002657void tcp_done(struct sock *sk)
2658{
2659 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2660 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2661
2662 tcp_set_state(sk, TCP_CLOSE);
2663 tcp_clear_xmit_timers(sk);
2664
2665 sk->sk_shutdown = SHUTDOWN_MASK;
2666
2667 if (!sock_flag(sk, SOCK_DEAD))
2668 sk->sk_state_change(sk);
2669 else
2670 inet_csk_destroy_sock(sk);
2671}
2672EXPORT_SYMBOL_GPL(tcp_done);
2673
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002674extern struct tcp_congestion_ops tcp_reno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
2676static __initdata unsigned long thash_entries;
2677static int __init set_thash_entries(char *str)
2678{
2679 if (!str)
2680 return 0;
2681 thash_entries = simple_strtoul(str, &str, 0);
2682 return 1;
2683}
2684__setup("thash_entries=", set_thash_entries);
2685
2686void __init tcp_init(void)
2687{
2688 struct sk_buff *skb = NULL;
John Heffner7b4f4b52006-03-25 01:34:07 -08002689 unsigned long limit;
2690 int order, i, max_share;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691
Pavel Emelyanov1f9e6362007-12-11 02:12:04 -08002692 BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002694 tcp_hashinfo.bind_bucket_cachep =
2695 kmem_cache_create("tcp_bind_bucket",
2696 sizeof(struct inet_bind_bucket), 0,
Paul Mundt20c2df82007-07-20 10:11:58 +09002697 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699 /* Size and allocate the main established and bind bucket
2700 * hash tables.
2701 *
2702 * The methodology is similar to that of the buffer cache.
2703 */
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002704 tcp_hashinfo.ehash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 alloc_large_system_hash("TCP established",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002706 sizeof(struct inet_ehash_bucket),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 thash_entries,
2708 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002709 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002710 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002711 &tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002712 NULL,
Jean Delvare0ccfe612007-10-30 00:59:25 -07002713 thash_entries ? 0 : 512 * 1024);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002714 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2715 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002716 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002717 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 }
Eric Dumazet230140c2007-11-07 02:40:20 -08002719 if (inet_ehash_locks_alloc(&tcp_hashinfo))
2720 panic("TCP: failed to alloc ehash_locks");
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002721 tcp_hashinfo.bhash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 alloc_large_system_hash("TCP bind",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002723 sizeof(struct inet_bind_hashbucket),
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002724 tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002726 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002727 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002728 &tcp_hashinfo.bhash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 NULL,
2730 64 * 1024);
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002731 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2732 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2733 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2734 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 }
2736
2737 /* Try to be a bit smarter and adjust defaults depending
2738 * on available memory.
2739 */
2740 for (order = 0; ((1 << order) << PAGE_SHIFT) <
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002741 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 order++)
2743 ;
Andi Kleene7626482005-06-13 14:24:52 -07002744 if (order >= 4) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002745 tcp_death_row.sysctl_max_tw_buckets = 180000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 sysctl_tcp_max_orphans = 4096 << (order - 4);
2747 sysctl_max_syn_backlog = 1024;
2748 } else if (order < 3) {
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002749 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 sysctl_tcp_max_orphans >>= (3 - order);
2751 sysctl_max_syn_backlog = 128;
2752 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753
John Heffner53cdcc02007-03-16 15:04:03 -07002754 /* Set the pressure threshold to be a fraction of global memory that
2755 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2756 * memory, with a floor of 128 pages.
2757 */
2758 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2759 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2760 limit = max(limit, 128UL);
2761 sysctl_tcp_mem[0] = limit / 4 * 3;
2762 sysctl_tcp_mem[1] = limit;
John Heffner52bf3762006-11-14 20:25:17 -08002763 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
John Heffner53cdcc02007-03-16 15:04:03 -07002765 /* Set per-socket limits to no more than 1/128 the pressure threshold */
John Heffner7b4f4b52006-03-25 01:34:07 -08002766 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2767 max_share = min(4UL*1024*1024, limit);
2768
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002769 sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
John Heffner7b4f4b52006-03-25 01:34:07 -08002770 sysctl_tcp_wmem[1] = 16*1024;
2771 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2772
Hideo Aoki3ab224b2007-12-31 00:11:19 -08002773 sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
John Heffner7b4f4b52006-03-25 01:34:07 -08002774 sysctl_tcp_rmem[1] = 87380;
2775 sysctl_tcp_rmem[2] = max(87380, max_share);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776
2777 printk(KERN_INFO "TCP: Hash tables configured "
2778 "(established %d bind %d)\n",
Eric Dumazetdbca9b2752007-02-08 14:16:46 -08002779 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002780
2781 tcp_register_congestion_control(&tcp_reno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782}
2783
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784EXPORT_SYMBOL(tcp_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785EXPORT_SYMBOL(tcp_disconnect);
2786EXPORT_SYMBOL(tcp_getsockopt);
2787EXPORT_SYMBOL(tcp_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788EXPORT_SYMBOL(tcp_poll);
2789EXPORT_SYMBOL(tcp_read_sock);
2790EXPORT_SYMBOL(tcp_recvmsg);
2791EXPORT_SYMBOL(tcp_sendmsg);
Jens Axboe9c55e012007-11-06 23:30:13 -08002792EXPORT_SYMBOL(tcp_splice_read);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793EXPORT_SYMBOL(tcp_sendpage);
2794EXPORT_SYMBOL(tcp_setsockopt);
2795EXPORT_SYMBOL(tcp_shutdown);
2796EXPORT_SYMBOL(tcp_statistics);