blob: 168f9de906bc06c94f8bd44308f5f2444f7eafaf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250#include <linux/module.h>
251#include <linux/types.h>
252#include <linux/fcntl.h>
253#include <linux/poll.h>
254#include <linux/init.h>
255#include <linux/smp_lock.h>
256#include <linux/fs.h>
257#include <linux/random.h>
258#include <linux/bootmem.h>
David S. Millerb8059ea2006-03-25 01:36:56 -0800259#include <linux/cache.h>
Herbert Xuf4c50d92006-06-22 03:02:40 -0700260#include <linux/err.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800261#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263#include <net/icmp.h>
264#include <net/tcp.h>
265#include <net/xfrm.h>
266#include <net/ip.h>
Chris Leech1a2449a2006-05-23 18:05:53 -0700267#include <net/netdma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
269#include <asm/uaccess.h>
270#include <asm/ioctls.h>
271
Brian Haleyab32ea52006-09-22 14:15:41 -0700272int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Eric Dumazetba899662005-08-26 12:05:31 -0700274DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276atomic_t tcp_orphan_count = ATOMIC_INIT(0);
277
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -0700278EXPORT_SYMBOL_GPL(tcp_orphan_count);
279
David S. Millerb8059ea2006-03-25 01:36:56 -0800280int sysctl_tcp_mem[3] __read_mostly;
281int sysctl_tcp_wmem[3] __read_mostly;
282int sysctl_tcp_rmem[3] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284EXPORT_SYMBOL(sysctl_tcp_mem);
285EXPORT_SYMBOL(sysctl_tcp_rmem);
286EXPORT_SYMBOL(sysctl_tcp_wmem);
287
288atomic_t tcp_memory_allocated; /* Current allocated memory. */
289atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
290
291EXPORT_SYMBOL(tcp_memory_allocated);
292EXPORT_SYMBOL(tcp_sockets_allocated);
293
294/*
295 * Pressure flag: try to collapse.
296 * Technical note: it is used by multiple contexts non atomically.
297 * All the sk_stream_mem_schedule() is of this nature: accounting
298 * is strict, actions are advisory and have some latency.
299 */
300int tcp_memory_pressure;
301
302EXPORT_SYMBOL(tcp_memory_pressure);
303
304void tcp_enter_memory_pressure(void)
305{
306 if (!tcp_memory_pressure) {
307 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
308 tcp_memory_pressure = 1;
309 }
310}
311
312EXPORT_SYMBOL(tcp_enter_memory_pressure);
313
314/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 * Wait for a TCP event.
316 *
317 * Note that we don't need to lock the socket, as the upper poll layers
318 * take care of normal races (between the test and the event) and we don't
319 * go look at any of the socket buffers directly.
320 */
321unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
322{
323 unsigned int mask;
324 struct sock *sk = sock->sk;
325 struct tcp_sock *tp = tcp_sk(sk);
326
327 poll_wait(file, sk->sk_sleep, wait);
328 if (sk->sk_state == TCP_LISTEN)
Arnaldo Carvalho de Melodc40c7b2005-08-23 21:52:58 -0700329 return inet_csk_listen_poll(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331 /* Socket is not locked. We are protected from async events
332 by poll logic and correct handling of state changes
333 made by another threads is impossible in any case.
334 */
335
336 mask = 0;
337 if (sk->sk_err)
338 mask = POLLERR;
339
340 /*
341 * POLLHUP is certainly not done right. But poll() doesn't
342 * have a notion of HUP in just one direction, and for a
343 * socket the read side is more interesting.
344 *
345 * Some poll() documentation says that POLLHUP is incompatible
346 * with the POLLOUT/POLLWR flags, so somebody should check this
347 * all. But careful, it tends to be safer to return too many
348 * bits than too few, and you can easily break real applications
349 * if you don't tell them that something has hung up!
350 *
351 * Check-me.
352 *
353 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
354 * our fs/select.c). It means that after we received EOF,
355 * poll always returns immediately, making impossible poll() on write()
356 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
357 * if and only if shutdown has been made in both directions.
358 * Actually, it is interesting to look how Solaris and DUX
359 * solve this dilemma. I would prefer, if PULLHUP were maskable,
360 * then we could set it on SND_SHUTDOWN. BTW examples given
361 * in Stevens' books assume exactly this behaviour, it explains
362 * why PULLHUP is incompatible with POLLOUT. --ANK
363 *
364 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
365 * blocking on fresh not-connected or disconnected socket. --ANK
366 */
367 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
368 mask |= POLLHUP;
369 if (sk->sk_shutdown & RCV_SHUTDOWN)
Davide Libenzif348d702006-03-25 03:07:39 -0800370 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
372 /* Connected? */
373 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
374 /* Potential race condition. If read of tp below will
375 * escape above sk->sk_state, we can be illegally awaken
376 * in SYN_* states. */
377 if ((tp->rcv_nxt != tp->copied_seq) &&
378 (tp->urg_seq != tp->copied_seq ||
379 tp->rcv_nxt != tp->copied_seq + 1 ||
380 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
381 mask |= POLLIN | POLLRDNORM;
382
383 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
384 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
385 mask |= POLLOUT | POLLWRNORM;
386 } else { /* send SIGIO later */
387 set_bit(SOCK_ASYNC_NOSPACE,
388 &sk->sk_socket->flags);
389 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
390
391 /* Race breaker. If space is freed after
392 * wspace test but before the flags are set,
393 * IO signal will be lost.
394 */
395 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
396 mask |= POLLOUT | POLLWRNORM;
397 }
398 }
399
400 if (tp->urg_data & TCP_URG_VALID)
401 mask |= POLLPRI;
402 }
403 return mask;
404}
405
406int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
407{
408 struct tcp_sock *tp = tcp_sk(sk);
409 int answ;
410
411 switch (cmd) {
412 case SIOCINQ:
413 if (sk->sk_state == TCP_LISTEN)
414 return -EINVAL;
415
416 lock_sock(sk);
417 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
418 answ = 0;
419 else if (sock_flag(sk, SOCK_URGINLINE) ||
420 !tp->urg_data ||
421 before(tp->urg_seq, tp->copied_seq) ||
422 !before(tp->urg_seq, tp->rcv_nxt)) {
423 answ = tp->rcv_nxt - tp->copied_seq;
424
425 /* Subtract 1, if FIN is in queue. */
426 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
427 answ -=
428 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
429 } else
430 answ = tp->urg_seq - tp->copied_seq;
431 release_sock(sk);
432 break;
433 case SIOCATMARK:
434 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
435 break;
436 case SIOCOUTQ:
437 if (sk->sk_state == TCP_LISTEN)
438 return -EINVAL;
439
440 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
441 answ = 0;
442 else
443 answ = tp->write_seq - tp->snd_una;
444 break;
445 default:
446 return -ENOIOCTLCMD;
447 };
448
449 return put_user(answ, (int __user *)arg);
450}
451
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
453{
454 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
455 tp->pushed_seq = tp->write_seq;
456}
457
458static inline int forced_push(struct tcp_sock *tp)
459{
460 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
461}
462
463static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
464 struct sk_buff *skb)
465{
466 skb->csum = 0;
467 TCP_SKB_CB(skb)->seq = tp->write_seq;
468 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
469 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
470 TCP_SKB_CB(skb)->sacked = 0;
471 skb_header_release(skb);
472 __skb_queue_tail(&sk->sk_write_queue, skb);
473 sk_charge_skb(sk, skb);
474 if (!sk->sk_send_head)
475 sk->sk_send_head = skb;
David S. Miller89ebd192005-08-23 10:13:06 -0700476 if (tp->nonagle & TCP_NAGLE_PUSH)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 tp->nonagle &= ~TCP_NAGLE_PUSH;
478}
479
480static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
481 struct sk_buff *skb)
482{
483 if (flags & MSG_OOB) {
484 tp->urg_mode = 1;
485 tp->snd_up = tp->write_seq;
486 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
487 }
488}
489
490static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
491 int mss_now, int nonagle)
492{
493 if (sk->sk_send_head) {
494 struct sk_buff *skb = sk->sk_write_queue.prev;
495 if (!(flags & MSG_MORE) || forced_push(tp))
496 tcp_mark_push(tp, skb);
497 tcp_mark_urg(tp, flags, skb);
498 __tcp_push_pending_frames(sk, tp, mss_now,
499 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
500 }
501}
502
503static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
504 size_t psize, int flags)
505{
506 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700507 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 int err;
509 ssize_t copied;
510 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
511
512 /* Wait for a connection to finish. */
513 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
514 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
515 goto out_err;
516
517 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
518
519 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700520 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 copied = 0;
522
523 err = -EPIPE;
524 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
525 goto do_error;
526
527 while (psize > 0) {
528 struct sk_buff *skb = sk->sk_write_queue.prev;
529 struct page *page = pages[poffset / PAGE_SIZE];
530 int copy, i, can_coalesce;
531 int offset = poffset % PAGE_SIZE;
532 int size = min_t(size_t, psize, PAGE_SIZE - offset);
533
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700534 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535new_segment:
536 if (!sk_stream_memory_free(sk))
537 goto wait_for_sndbuf;
538
539 skb = sk_stream_alloc_pskb(sk, 0, 0,
540 sk->sk_allocation);
541 if (!skb)
542 goto wait_for_memory;
543
544 skb_entail(sk, tp, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700545 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 }
547
548 if (copy > size)
549 copy = size;
550
551 i = skb_shinfo(skb)->nr_frags;
552 can_coalesce = skb_can_coalesce(skb, i, page, offset);
553 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
554 tcp_mark_push(tp, skb);
555 goto new_segment;
556 }
Herbert Xud80d99d62005-09-01 17:48:23 -0700557 if (!sk_stream_wmem_schedule(sk, copy))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 goto wait_for_memory;
559
560 if (can_coalesce) {
561 skb_shinfo(skb)->frags[i - 1].size += copy;
562 } else {
563 get_page(page);
564 skb_fill_page_desc(skb, i, page, offset, copy);
565 }
566
567 skb->len += copy;
568 skb->data_len += copy;
569 skb->truesize += copy;
570 sk->sk_wmem_queued += copy;
571 sk->sk_forward_alloc -= copy;
Patrick McHardy84fa7932006-08-29 16:44:56 -0700572 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 tp->write_seq += copy;
574 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700575 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 if (!copied)
578 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
579
580 copied += copy;
581 poffset += copy;
582 if (!(psize -= copy))
583 goto out;
584
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700585 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 continue;
587
588 if (forced_push(tp)) {
589 tcp_mark_push(tp, skb);
590 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
591 } else if (skb == sk->sk_send_head)
592 tcp_push_one(sk, mss_now);
593 continue;
594
595wait_for_sndbuf:
596 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
597wait_for_memory:
598 if (copied)
599 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
600
601 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
602 goto do_error;
603
604 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700605 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 }
607
608out:
609 if (copied)
610 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
611 return copied;
612
613do_error:
614 if (copied)
615 goto out;
616out_err:
617 return sk_stream_error(sk, flags, err);
618}
619
620ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
621 size_t size, int flags)
622{
623 ssize_t res;
624 struct sock *sk = sock->sk;
625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626 if (!(sk->sk_route_caps & NETIF_F_SG) ||
Herbert Xu8648b302006-06-17 22:06:05 -0700627 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 return sock_no_sendpage(sock, page, offset, size, flags);
629
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 lock_sock(sk);
631 TCP_CHECK_TIMER(sk);
632 res = do_tcp_sendpages(sk, &page, offset, size, flags);
633 TCP_CHECK_TIMER(sk);
634 release_sock(sk);
635 return res;
636}
637
638#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
639#define TCP_OFF(sk) (sk->sk_sndmsg_off)
640
641static inline int select_size(struct sock *sk, struct tcp_sock *tp)
642{
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700643 int tmp = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
David S. Millerb4e26f52005-07-05 15:20:27 -0700645 if (sk->sk_route_caps & NETIF_F_SG) {
Herbert Xubcd76112006-06-30 13:36:35 -0700646 if (sk_can_gso(sk))
David S. Millerb4e26f52005-07-05 15:20:27 -0700647 tmp = 0;
648 else {
649 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
650
651 if (tmp >= pgbreak &&
652 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
653 tmp = pgbreak;
654 }
655 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657 return tmp;
658}
659
660int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
661 size_t size)
662{
663 struct iovec *iov;
664 struct tcp_sock *tp = tcp_sk(sk);
665 struct sk_buff *skb;
666 int iovlen, flags;
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700667 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 int err, copied;
669 long timeo;
670
671 lock_sock(sk);
672 TCP_CHECK_TIMER(sk);
673
674 flags = msg->msg_flags;
675 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
676
677 /* Wait for a connection to finish. */
678 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
679 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
680 goto out_err;
681
682 /* This should be in poll */
683 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
684
685 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700686 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
688 /* Ok commence sending. */
689 iovlen = msg->msg_iovlen;
690 iov = msg->msg_iov;
691 copied = 0;
692
693 err = -EPIPE;
694 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
695 goto do_error;
696
697 while (--iovlen >= 0) {
698 int seglen = iov->iov_len;
699 unsigned char __user *from = iov->iov_base;
700
701 iov++;
702
703 while (seglen > 0) {
704 int copy;
705
706 skb = sk->sk_write_queue.prev;
707
708 if (!sk->sk_send_head ||
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700709 (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711new_segment:
712 /* Allocate new segment. If the interface is SG,
713 * allocate skb fitting to single page.
714 */
715 if (!sk_stream_memory_free(sk))
716 goto wait_for_sndbuf;
717
718 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
719 0, sk->sk_allocation);
720 if (!skb)
721 goto wait_for_memory;
722
723 /*
724 * Check whether we can use HW checksum.
725 */
Herbert Xu8648b302006-06-17 22:06:05 -0700726 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700727 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
729 skb_entail(sk, tp, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700730 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 }
732
733 /* Try to append data to the end of skb. */
734 if (copy > seglen)
735 copy = seglen;
736
737 /* Where to copy to? */
738 if (skb_tailroom(skb) > 0) {
739 /* We have some space in skb head. Superb! */
740 if (copy > skb_tailroom(skb))
741 copy = skb_tailroom(skb);
742 if ((err = skb_add_data(skb, from, copy)) != 0)
743 goto do_fault;
744 } else {
745 int merge = 0;
746 int i = skb_shinfo(skb)->nr_frags;
747 struct page *page = TCP_PAGE(sk);
748 int off = TCP_OFF(sk);
749
750 if (skb_can_coalesce(skb, i, page, off) &&
751 off != PAGE_SIZE) {
752 /* We can extend the last page
753 * fragment. */
754 merge = 1;
755 } else if (i == MAX_SKB_FRAGS ||
756 (!i &&
757 !(sk->sk_route_caps & NETIF_F_SG))) {
758 /* Need to add new fragment and cannot
759 * do this because interface is non-SG,
760 * or because all the page slots are
761 * busy. */
762 tcp_mark_push(tp, skb);
763 goto new_segment;
764 } else if (page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 if (off == PAGE_SIZE) {
766 put_page(page);
767 TCP_PAGE(sk) = page = NULL;
Herbert Xufb5f5e62005-09-05 18:55:48 -0700768 off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 }
Herbert Xuef015782005-09-01 17:48:59 -0700770 } else
Herbert Xufb5f5e62005-09-05 18:55:48 -0700771 off = 0;
Herbert Xuef015782005-09-01 17:48:59 -0700772
773 if (copy > PAGE_SIZE - off)
774 copy = PAGE_SIZE - off;
775
776 if (!sk_stream_wmem_schedule(sk, copy))
777 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779 if (!page) {
780 /* Allocate new cache page. */
781 if (!(page = sk_stream_alloc_page(sk)))
782 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 }
784
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 /* Time to copy data. We are close to
786 * the end! */
787 err = skb_copy_to_page(sk, from, skb, page,
788 off, copy);
789 if (err) {
790 /* If this page was new, give it to the
791 * socket so it does not get leaked.
792 */
793 if (!TCP_PAGE(sk)) {
794 TCP_PAGE(sk) = page;
795 TCP_OFF(sk) = 0;
796 }
797 goto do_error;
798 }
799
800 /* Update the skb. */
801 if (merge) {
802 skb_shinfo(skb)->frags[i - 1].size +=
803 copy;
804 } else {
805 skb_fill_page_desc(skb, i, page, off, copy);
806 if (TCP_PAGE(sk)) {
807 get_page(page);
808 } else if (off + copy < PAGE_SIZE) {
809 get_page(page);
810 TCP_PAGE(sk) = page;
811 }
812 }
813
814 TCP_OFF(sk) = off + copy;
815 }
816
817 if (!copied)
818 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
819
820 tp->write_seq += copy;
821 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700822 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
824 from += copy;
825 copied += copy;
826 if ((seglen -= copy) == 0 && iovlen == 0)
827 goto out;
828
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700829 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 continue;
831
832 if (forced_push(tp)) {
833 tcp_mark_push(tp, skb);
834 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
835 } else if (skb == sk->sk_send_head)
836 tcp_push_one(sk, mss_now);
837 continue;
838
839wait_for_sndbuf:
840 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
841wait_for_memory:
842 if (copied)
843 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
844
845 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
846 goto do_error;
847
848 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700849 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 }
851 }
852
853out:
854 if (copied)
855 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
856 TCP_CHECK_TIMER(sk);
857 release_sock(sk);
858 return copied;
859
860do_fault:
861 if (!skb->len) {
862 if (sk->sk_send_head == skb)
863 sk->sk_send_head = NULL;
David S. Miller8728b832005-08-09 19:25:21 -0700864 __skb_unlink(skb, &sk->sk_write_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 sk_stream_free_skb(sk, skb);
866 }
867
868do_error:
869 if (copied)
870 goto out;
871out_err:
872 err = sk_stream_error(sk, flags, err);
873 TCP_CHECK_TIMER(sk);
874 release_sock(sk);
875 return err;
876}
877
878/*
879 * Handle reading urgent data. BSD has very simple semantics for
880 * this, no blocking and very strange errors 8)
881 */
882
883static int tcp_recv_urg(struct sock *sk, long timeo,
884 struct msghdr *msg, int len, int flags,
885 int *addr_len)
886{
887 struct tcp_sock *tp = tcp_sk(sk);
888
889 /* No URG data to read. */
890 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
891 tp->urg_data == TCP_URG_READ)
892 return -EINVAL; /* Yes this is right ! */
893
894 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
895 return -ENOTCONN;
896
897 if (tp->urg_data & TCP_URG_VALID) {
898 int err = 0;
899 char c = tp->urg_data;
900
901 if (!(flags & MSG_PEEK))
902 tp->urg_data = TCP_URG_READ;
903
904 /* Read urgent data. */
905 msg->msg_flags |= MSG_OOB;
906
907 if (len > 0) {
908 if (!(flags & MSG_TRUNC))
909 err = memcpy_toiovec(msg->msg_iov, &c, 1);
910 len = 1;
911 } else
912 msg->msg_flags |= MSG_TRUNC;
913
914 return err ? -EFAULT : len;
915 }
916
917 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
918 return 0;
919
920 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
921 * the available implementations agree in this case:
922 * this call should never block, independent of the
923 * blocking state of the socket.
924 * Mike <pall@rz.uni-karlsruhe.de>
925 */
926 return -EAGAIN;
927}
928
929/* Clean up the receive buffer for full frames taken by the user,
930 * then send an ACK if necessary. COPIED is the number of bytes
931 * tcp_recvmsg has given to the user so far, it speeds up the
932 * calculation of whether or not we must ACK for the sake of
933 * a window update.
934 */
Chris Leech0e4b4992006-05-23 18:00:16 -0700935void tcp_cleanup_rbuf(struct sock *sk, int copied)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936{
937 struct tcp_sock *tp = tcp_sk(sk);
938 int time_to_ack = 0;
939
940#if TCP_DEBUG
941 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
942
943 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
944#endif
945
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700946 if (inet_csk_ack_scheduled(sk)) {
947 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 /* Delayed ACKs frequently hit locked sockets during bulk
949 * receive. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700950 if (icsk->icsk_ack.blocked ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 /* Once-per-two-segments ACK was not sent by tcp_input.c */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700952 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 /*
954 * If this read emptied read buffer, we send ACK, if
955 * connection is not bidirectional, user drained
956 * receive buffer and there was a small segment
957 * in queue.
958 */
Alexey Kuznetsov1ef96962006-09-19 12:52:50 -0700959 (copied > 0 &&
960 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
961 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
962 !icsk->icsk_ack.pingpong)) &&
963 !atomic_read(&sk->sk_rmem_alloc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 time_to_ack = 1;
965 }
966
967 /* We send an ACK if we can now advertise a non-zero window
968 * which has been raised "significantly".
969 *
970 * Even if window raised up to infinity, do not send window open ACK
971 * in states, where we will not receive more. It is useless.
972 */
973 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
974 __u32 rcv_window_now = tcp_receive_window(tp);
975
976 /* Optimize, __tcp_select_window() is not cheap. */
977 if (2*rcv_window_now <= tp->window_clamp) {
978 __u32 new_window = __tcp_select_window(sk);
979
980 /* Send ACK now, if this read freed lots of space
981 * in our buffer. Certainly, new_window is new window.
982 * We can advertise it now, if it is not less than current one.
983 * "Lots" means "at least twice" here.
984 */
985 if (new_window && new_window >= 2 * rcv_window_now)
986 time_to_ack = 1;
987 }
988 }
989 if (time_to_ack)
990 tcp_send_ack(sk);
991}
992
993static void tcp_prequeue_process(struct sock *sk)
994{
995 struct sk_buff *skb;
996 struct tcp_sock *tp = tcp_sk(sk);
997
David S. Millerb03efcf2005-07-08 14:57:23 -0700998 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
1000 /* RX process wants to run with disabled BHs, though it is not
1001 * necessary */
1002 local_bh_disable();
1003 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1004 sk->sk_backlog_rcv(sk, skb);
1005 local_bh_enable();
1006
1007 /* Clear memory counter. */
1008 tp->ucopy.memory = 0;
1009}
1010
1011static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1012{
1013 struct sk_buff *skb;
1014 u32 offset;
1015
1016 skb_queue_walk(&sk->sk_receive_queue, skb) {
1017 offset = seq - TCP_SKB_CB(skb)->seq;
1018 if (skb->h.th->syn)
1019 offset--;
1020 if (offset < skb->len || skb->h.th->fin) {
1021 *off = offset;
1022 return skb;
1023 }
1024 }
1025 return NULL;
1026}
1027
1028/*
1029 * This routine provides an alternative to tcp_recvmsg() for routines
1030 * that would like to handle copying from skbuffs directly in 'sendfile'
1031 * fashion.
1032 * Note:
1033 * - It is assumed that the socket was locked by the caller.
1034 * - The routine does not block.
1035 * - At present, there is no support for reading OOB data
1036 * or for 'peeking' the socket using this routine
1037 * (although both would be easy to implement).
1038 */
1039int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1040 sk_read_actor_t recv_actor)
1041{
1042 struct sk_buff *skb;
1043 struct tcp_sock *tp = tcp_sk(sk);
1044 u32 seq = tp->copied_seq;
1045 u32 offset;
1046 int copied = 0;
1047
1048 if (sk->sk_state == TCP_LISTEN)
1049 return -ENOTCONN;
1050 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1051 if (offset < skb->len) {
1052 size_t used, len;
1053
1054 len = skb->len - offset;
1055 /* Stop reading if we hit a patch of urgent data */
1056 if (tp->urg_data) {
1057 u32 urg_offset = tp->urg_seq - seq;
1058 if (urg_offset < len)
1059 len = urg_offset;
1060 if (!len)
1061 break;
1062 }
1063 used = recv_actor(desc, skb, offset, len);
1064 if (used <= len) {
1065 seq += used;
1066 copied += used;
1067 offset += used;
1068 }
1069 if (offset != skb->len)
1070 break;
1071 }
1072 if (skb->h.th->fin) {
Chris Leech624d1162006-05-23 18:01:28 -07001073 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 ++seq;
1075 break;
1076 }
Chris Leech624d1162006-05-23 18:01:28 -07001077 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 if (!desc->count)
1079 break;
1080 }
1081 tp->copied_seq = seq;
1082
1083 tcp_rcv_space_adjust(sk);
1084
1085 /* Clean up data we have read: This will do ACK frames. */
1086 if (copied)
Chris Leech0e4b4992006-05-23 18:00:16 -07001087 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 return copied;
1089}
1090
1091/*
1092 * This routine copies from a sock struct into the user buffer.
1093 *
1094 * Technical note: in 2.3 we work on _locked_ socket, so that
1095 * tricks with *seq access order and skb->users are not required.
1096 * Probably, code can be easily improved even more.
1097 */
1098
1099int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1100 size_t len, int nonblock, int flags, int *addr_len)
1101{
1102 struct tcp_sock *tp = tcp_sk(sk);
1103 int copied = 0;
1104 u32 peek_seq;
1105 u32 *seq;
1106 unsigned long used;
1107 int err;
1108 int target; /* Read at least this many bytes */
1109 long timeo;
1110 struct task_struct *user_recv = NULL;
Chris Leech1a2449a2006-05-23 18:05:53 -07001111 int copied_early = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112
1113 lock_sock(sk);
1114
1115 TCP_CHECK_TIMER(sk);
1116
1117 err = -ENOTCONN;
1118 if (sk->sk_state == TCP_LISTEN)
1119 goto out;
1120
1121 timeo = sock_rcvtimeo(sk, nonblock);
1122
1123 /* Urgent data needs to be handled specially. */
1124 if (flags & MSG_OOB)
1125 goto recv_urg;
1126
1127 seq = &tp->copied_seq;
1128 if (flags & MSG_PEEK) {
1129 peek_seq = tp->copied_seq;
1130 seq = &peek_seq;
1131 }
1132
1133 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1134
Chris Leech1a2449a2006-05-23 18:05:53 -07001135#ifdef CONFIG_NET_DMA
1136 tp->ucopy.dma_chan = NULL;
1137 preempt_disable();
1138 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07001139 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001140 preempt_enable_no_resched();
1141 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1142 } else
1143 preempt_enable_no_resched();
1144#endif
1145
Linus Torvalds1da177e2005-04-16 15:20:36 -07001146 do {
1147 struct sk_buff *skb;
1148 u32 offset;
1149
1150 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1151 if (tp->urg_data && tp->urg_seq == *seq) {
1152 if (copied)
1153 break;
1154 if (signal_pending(current)) {
1155 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1156 break;
1157 }
1158 }
1159
1160 /* Next get a buffer. */
1161
1162 skb = skb_peek(&sk->sk_receive_queue);
1163 do {
1164 if (!skb)
1165 break;
1166
1167 /* Now that we have two receive queues this
1168 * shouldn't happen.
1169 */
1170 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1171 printk(KERN_INFO "recvmsg bug: copied %X "
1172 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1173 break;
1174 }
1175 offset = *seq - TCP_SKB_CB(skb)->seq;
1176 if (skb->h.th->syn)
1177 offset--;
1178 if (offset < skb->len)
1179 goto found_ok_skb;
1180 if (skb->h.th->fin)
1181 goto found_fin_ok;
1182 BUG_TRAP(flags & MSG_PEEK);
1183 skb = skb->next;
1184 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1185
1186 /* Well, if we have backlog, try to process it now yet. */
1187
1188 if (copied >= target && !sk->sk_backlog.tail)
1189 break;
1190
1191 if (copied) {
1192 if (sk->sk_err ||
1193 sk->sk_state == TCP_CLOSE ||
1194 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1195 !timeo ||
1196 signal_pending(current) ||
1197 (flags & MSG_PEEK))
1198 break;
1199 } else {
1200 if (sock_flag(sk, SOCK_DONE))
1201 break;
1202
1203 if (sk->sk_err) {
1204 copied = sock_error(sk);
1205 break;
1206 }
1207
1208 if (sk->sk_shutdown & RCV_SHUTDOWN)
1209 break;
1210
1211 if (sk->sk_state == TCP_CLOSE) {
1212 if (!sock_flag(sk, SOCK_DONE)) {
1213 /* This occurs when user tries to read
1214 * from never connected socket.
1215 */
1216 copied = -ENOTCONN;
1217 break;
1218 }
1219 break;
1220 }
1221
1222 if (!timeo) {
1223 copied = -EAGAIN;
1224 break;
1225 }
1226
1227 if (signal_pending(current)) {
1228 copied = sock_intr_errno(timeo);
1229 break;
1230 }
1231 }
1232
Chris Leech0e4b4992006-05-23 18:00:16 -07001233 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
David S. Miller7df55122005-06-18 23:01:10 -07001235 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 /* Install new reader */
1237 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1238 user_recv = current;
1239 tp->ucopy.task = user_recv;
1240 tp->ucopy.iov = msg->msg_iov;
1241 }
1242
1243 tp->ucopy.len = len;
1244
1245 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1246 (flags & (MSG_PEEK | MSG_TRUNC)));
1247
1248 /* Ugly... If prequeue is not empty, we have to
1249 * process it before releasing socket, otherwise
1250 * order will be broken at second iteration.
1251 * More elegant solution is required!!!
1252 *
1253 * Look: we have the following (pseudo)queues:
1254 *
1255 * 1. packets in flight
1256 * 2. backlog
1257 * 3. prequeue
1258 * 4. receive_queue
1259 *
1260 * Each queue can be processed only if the next ones
1261 * are empty. At this point we have empty receive_queue.
1262 * But prequeue _can_ be not empty after 2nd iteration,
1263 * when we jumped to start of loop because backlog
1264 * processing added something to receive_queue.
1265 * We cannot release_sock(), because backlog contains
1266 * packets arrived _after_ prequeued ones.
1267 *
1268 * Shortly, algorithm is clear --- to process all
1269 * the queues in order. We could make it more directly,
1270 * requeueing packets from backlog to prequeue, if
1271 * is not empty. It is more elegant, but eats cycles,
1272 * unfortunately.
1273 */
David S. Millerb03efcf2005-07-08 14:57:23 -07001274 if (!skb_queue_empty(&tp->ucopy.prequeue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 goto do_prequeue;
1276
1277 /* __ Set realtime policy in scheduler __ */
1278 }
1279
1280 if (copied >= target) {
1281 /* Do not sleep, just process backlog. */
1282 release_sock(sk);
1283 lock_sock(sk);
1284 } else
1285 sk_wait_data(sk, &timeo);
1286
Chris Leech1a2449a2006-05-23 18:05:53 -07001287#ifdef CONFIG_NET_DMA
1288 tp->ucopy.wakeup = 0;
1289#endif
1290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 if (user_recv) {
1292 int chunk;
1293
1294 /* __ Restore normal policy in scheduler __ */
1295
1296 if ((chunk = len - tp->ucopy.len) != 0) {
1297 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1298 len -= chunk;
1299 copied += chunk;
1300 }
1301
1302 if (tp->rcv_nxt == tp->copied_seq &&
David S. Millerb03efcf2005-07-08 14:57:23 -07001303 !skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304do_prequeue:
1305 tcp_prequeue_process(sk);
1306
1307 if ((chunk = len - tp->ucopy.len) != 0) {
1308 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1309 len -= chunk;
1310 copied += chunk;
1311 }
1312 }
1313 }
1314 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1315 if (net_ratelimit())
1316 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1317 current->comm, current->pid);
1318 peek_seq = tp->copied_seq;
1319 }
1320 continue;
1321
1322 found_ok_skb:
1323 /* Ok so how much can we use? */
1324 used = skb->len - offset;
1325 if (len < used)
1326 used = len;
1327
1328 /* Do we have urgent data here? */
1329 if (tp->urg_data) {
1330 u32 urg_offset = tp->urg_seq - *seq;
1331 if (urg_offset < used) {
1332 if (!urg_offset) {
1333 if (!sock_flag(sk, SOCK_URGINLINE)) {
1334 ++*seq;
1335 offset++;
1336 used--;
1337 if (!used)
1338 goto skip_copy;
1339 }
1340 } else
1341 used = urg_offset;
1342 }
1343 }
1344
1345 if (!(flags & MSG_TRUNC)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001346#ifdef CONFIG_NET_DMA
1347 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1348 tp->ucopy.dma_chan = get_softnet_dma();
1349
1350 if (tp->ucopy.dma_chan) {
1351 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1352 tp->ucopy.dma_chan, skb, offset,
1353 msg->msg_iov, used,
1354 tp->ucopy.pinned_list);
1355
1356 if (tp->ucopy.dma_cookie < 0) {
1357
1358 printk(KERN_ALERT "dma_cookie < 0\n");
1359
1360 /* Exception. Bailout! */
1361 if (!copied)
1362 copied = -EFAULT;
1363 break;
1364 }
1365 if ((offset + used) == skb->len)
1366 copied_early = 1;
1367
1368 } else
1369#endif
1370 {
1371 err = skb_copy_datagram_iovec(skb, offset,
1372 msg->msg_iov, used);
1373 if (err) {
1374 /* Exception. Bailout! */
1375 if (!copied)
1376 copied = -EFAULT;
1377 break;
1378 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001379 }
1380 }
1381
1382 *seq += used;
1383 copied += used;
1384 len -= used;
1385
1386 tcp_rcv_space_adjust(sk);
1387
1388skip_copy:
1389 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1390 tp->urg_data = 0;
1391 tcp_fast_path_check(sk, tp);
1392 }
1393 if (used + offset < skb->len)
1394 continue;
1395
1396 if (skb->h.th->fin)
1397 goto found_fin_ok;
Chris Leech1a2449a2006-05-23 18:05:53 -07001398 if (!(flags & MSG_PEEK)) {
1399 sk_eat_skb(sk, skb, copied_early);
1400 copied_early = 0;
1401 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 continue;
1403
1404 found_fin_ok:
1405 /* Process the FIN. */
1406 ++*seq;
Chris Leech1a2449a2006-05-23 18:05:53 -07001407 if (!(flags & MSG_PEEK)) {
1408 sk_eat_skb(sk, skb, copied_early);
1409 copied_early = 0;
1410 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 break;
1412 } while (len > 0);
1413
1414 if (user_recv) {
David S. Millerb03efcf2005-07-08 14:57:23 -07001415 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 int chunk;
1417
1418 tp->ucopy.len = copied > 0 ? len : 0;
1419
1420 tcp_prequeue_process(sk);
1421
1422 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1423 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1424 len -= chunk;
1425 copied += chunk;
1426 }
1427 }
1428
1429 tp->ucopy.task = NULL;
1430 tp->ucopy.len = 0;
1431 }
1432
Chris Leech1a2449a2006-05-23 18:05:53 -07001433#ifdef CONFIG_NET_DMA
1434 if (tp->ucopy.dma_chan) {
1435 struct sk_buff *skb;
1436 dma_cookie_t done, used;
1437
1438 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1439
1440 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1441 tp->ucopy.dma_cookie, &done,
1442 &used) == DMA_IN_PROGRESS) {
1443 /* do partial cleanup of sk_async_wait_queue */
1444 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1445 (dma_async_is_complete(skb->dma_cookie, done,
1446 used) == DMA_SUCCESS)) {
1447 __skb_dequeue(&sk->sk_async_wait_queue);
1448 kfree_skb(skb);
1449 }
1450 }
1451
1452 /* Safe to free early-copied skbs now */
1453 __skb_queue_purge(&sk->sk_async_wait_queue);
1454 dma_chan_put(tp->ucopy.dma_chan);
1455 tp->ucopy.dma_chan = NULL;
1456 }
1457 if (tp->ucopy.pinned_list) {
1458 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1459 tp->ucopy.pinned_list = NULL;
1460 }
1461#endif
1462
Linus Torvalds1da177e2005-04-16 15:20:36 -07001463 /* According to UNIX98, msg_name/msg_namelen are ignored
1464 * on connected socket. I was just happy when found this 8) --ANK
1465 */
1466
1467 /* Clean up data we have read: This will do ACK frames. */
Chris Leech0e4b4992006-05-23 18:00:16 -07001468 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469
1470 TCP_CHECK_TIMER(sk);
1471 release_sock(sk);
1472 return copied;
1473
1474out:
1475 TCP_CHECK_TIMER(sk);
1476 release_sock(sk);
1477 return err;
1478
1479recv_urg:
1480 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1481 goto out;
1482}
1483
1484/*
1485 * State processing on a close. This implements the state shift for
1486 * sending our FIN frame. Note that we only send a FIN for some
1487 * states. A shutdown() may have already sent the FIN, or we may be
1488 * closed.
1489 */
1490
Arjan van de Ven9b5b5cf2005-11-29 16:21:38 -08001491static const unsigned char new_state[16] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 /* current state: new state: action: */
1493 /* (Invalid) */ TCP_CLOSE,
1494 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1495 /* TCP_SYN_SENT */ TCP_CLOSE,
1496 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1497 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1498 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1499 /* TCP_TIME_WAIT */ TCP_CLOSE,
1500 /* TCP_CLOSE */ TCP_CLOSE,
1501 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1502 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1503 /* TCP_LISTEN */ TCP_CLOSE,
1504 /* TCP_CLOSING */ TCP_CLOSING,
1505};
1506
1507static int tcp_close_state(struct sock *sk)
1508{
1509 int next = (int)new_state[sk->sk_state];
1510 int ns = next & TCP_STATE_MASK;
1511
1512 tcp_set_state(sk, ns);
1513
1514 return next & TCP_ACTION_FIN;
1515}
1516
1517/*
1518 * Shutdown the sending side of a connection. Much like close except
1519 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1520 */
1521
1522void tcp_shutdown(struct sock *sk, int how)
1523{
1524 /* We need to grab some memory, and put together a FIN,
1525 * and then put it into the queue to be sent.
1526 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1527 */
1528 if (!(how & SEND_SHUTDOWN))
1529 return;
1530
1531 /* If we've already sent a FIN, or it's a closed state, skip this. */
1532 if ((1 << sk->sk_state) &
1533 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1534 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1535 /* Clear out any half completed packets. FIN if needed. */
1536 if (tcp_close_state(sk))
1537 tcp_send_fin(sk);
1538 }
1539}
1540
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541void tcp_close(struct sock *sk, long timeout)
1542{
1543 struct sk_buff *skb;
1544 int data_was_unread = 0;
Herbert Xu75c2d9072006-05-03 23:31:35 -07001545 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546
1547 lock_sock(sk);
1548 sk->sk_shutdown = SHUTDOWN_MASK;
1549
1550 if (sk->sk_state == TCP_LISTEN) {
1551 tcp_set_state(sk, TCP_CLOSE);
1552
1553 /* Special case. */
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001554 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001555
1556 goto adjudge_to_death;
1557 }
1558
1559 /* We need to flush the recv. buffs. We do this only on the
1560 * descriptor close, not protocol-sourced closes, because the
1561 * reader process may not have drained the data yet!
1562 */
1563 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1564 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1565 skb->h.th->fin;
1566 data_was_unread += len;
1567 __kfree_skb(skb);
1568 }
1569
1570 sk_stream_mem_reclaim(sk);
1571
1572 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1573 * 3.10, we send a RST here because data was lost. To
1574 * witness the awful effects of the old behavior of always
1575 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1576 * a bulk GET in an FTP client, suspend the process, wait
1577 * for the client to advertise a zero window, then kill -9
1578 * the FTP client, wheee... Note: timeout is always zero
1579 * in such a case.
1580 */
1581 if (data_was_unread) {
1582 /* Unread data was tossed, zap the connection. */
1583 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1584 tcp_set_state(sk, TCP_CLOSE);
1585 tcp_send_active_reset(sk, GFP_KERNEL);
1586 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1587 /* Check zero linger _after_ checking for unread data. */
1588 sk->sk_prot->disconnect(sk, 0);
1589 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1590 } else if (tcp_close_state(sk)) {
1591 /* We FIN if the application ate all the data before
1592 * zapping the connection.
1593 */
1594
1595 /* RED-PEN. Formally speaking, we have broken TCP state
1596 * machine. State transitions:
1597 *
1598 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1599 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1600 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1601 *
1602 * are legal only when FIN has been sent (i.e. in window),
1603 * rather than queued out of window. Purists blame.
1604 *
1605 * F.e. "RFC state" is ESTABLISHED,
1606 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1607 *
1608 * The visible declinations are that sometimes
1609 * we enter time-wait state, when it is not required really
1610 * (harmless), do not send active resets, when they are
1611 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1612 * they look as CLOSING or LAST_ACK for Linux)
1613 * Probably, I missed some more holelets.
1614 * --ANK
1615 */
1616 tcp_send_fin(sk);
1617 }
1618
1619 sk_stream_wait_close(sk, timeout);
1620
1621adjudge_to_death:
Herbert Xu75c2d9072006-05-03 23:31:35 -07001622 state = sk->sk_state;
1623 sock_hold(sk);
1624 sock_orphan(sk);
1625 atomic_inc(sk->sk_prot->orphan_count);
1626
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 /* It is the last release_sock in its life. It will remove backlog. */
1628 release_sock(sk);
1629
1630
1631 /* Now socket is owned by kernel and we acquire BH lock
1632 to finish close. No need to check for user refs.
1633 */
1634 local_bh_disable();
1635 bh_lock_sock(sk);
1636 BUG_TRAP(!sock_owned_by_user(sk));
1637
Herbert Xu75c2d9072006-05-03 23:31:35 -07001638 /* Have we already been destroyed by a softirq or backlog? */
1639 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1640 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001641
1642 /* This is a (useful) BSD violating of the RFC. There is a
1643 * problem with TCP as specified in that the other end could
1644 * keep a socket open forever with no application left this end.
1645 * We use a 3 minute timeout (about the same as BSD) then kill
1646 * our end. If they send after that then tough - BUT: long enough
1647 * that we won't make the old 4*rto = almost no time - whoops
1648 * reset mistake.
1649 *
1650 * Nope, it was not mistake. It is really desired behaviour
1651 * f.e. on http servers, when such sockets are useless, but
1652 * consume significant resources. Let's do it with special
1653 * linger2 option. --ANK
1654 */
1655
1656 if (sk->sk_state == TCP_FIN_WAIT2) {
1657 struct tcp_sock *tp = tcp_sk(sk);
1658 if (tp->linger2 < 0) {
1659 tcp_set_state(sk, TCP_CLOSE);
1660 tcp_send_active_reset(sk, GFP_ATOMIC);
1661 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1662 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001663 const int tmo = tcp_fin_time(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664
1665 if (tmo > TCP_TIMEWAIT_LEN) {
David S. Miller52499af2006-07-31 22:32:09 -07001666 inet_csk_reset_keepalive_timer(sk,
1667 tmo - TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001668 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1670 goto out;
1671 }
1672 }
1673 }
1674 if (sk->sk_state != TCP_CLOSE) {
1675 sk_stream_mem_reclaim(sk);
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001676 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1678 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1679 if (net_ratelimit())
1680 printk(KERN_INFO "TCP: too many of orphaned "
1681 "sockets\n");
1682 tcp_set_state(sk, TCP_CLOSE);
1683 tcp_send_active_reset(sk, GFP_ATOMIC);
1684 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1685 }
1686 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001687
1688 if (sk->sk_state == TCP_CLOSE)
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001689 inet_csk_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690 /* Otherwise, socket is reprieved until protocol close. */
1691
1692out:
1693 bh_unlock_sock(sk);
1694 local_bh_enable();
1695 sock_put(sk);
1696}
1697
1698/* These states need RST on ABORT according to RFC793 */
1699
1700static inline int tcp_need_reset(int state)
1701{
1702 return (1 << state) &
1703 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1704 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1705}
1706
1707int tcp_disconnect(struct sock *sk, int flags)
1708{
1709 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001710 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711 struct tcp_sock *tp = tcp_sk(sk);
1712 int err = 0;
1713 int old_state = sk->sk_state;
1714
1715 if (old_state != TCP_CLOSE)
1716 tcp_set_state(sk, TCP_CLOSE);
1717
1718 /* ABORT function of RFC793 */
1719 if (old_state == TCP_LISTEN) {
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001720 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 } else if (tcp_need_reset(old_state) ||
1722 (tp->snd_nxt != tp->write_seq &&
1723 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001724 /* The last check adjusts for discrepancy of Linux wrt. RFC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 * states
1726 */
1727 tcp_send_active_reset(sk, gfp_any());
1728 sk->sk_err = ECONNRESET;
1729 } else if (old_state == TCP_SYN_SENT)
1730 sk->sk_err = ECONNRESET;
1731
1732 tcp_clear_xmit_timers(sk);
1733 __skb_queue_purge(&sk->sk_receive_queue);
1734 sk_stream_writequeue_purge(sk);
1735 __skb_queue_purge(&tp->out_of_order_queue);
Chris Leech1a2449a2006-05-23 18:05:53 -07001736#ifdef CONFIG_NET_DMA
1737 __skb_queue_purge(&sk->sk_async_wait_queue);
1738#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
1740 inet->dport = 0;
1741
1742 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1743 inet_reset_saddr(sk);
1744
1745 sk->sk_shutdown = 0;
1746 sock_reset_flag(sk, SOCK_DONE);
1747 tp->srtt = 0;
1748 if ((tp->write_seq += tp->max_window + 2) == 0)
1749 tp->write_seq = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001750 icsk->icsk_backoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 tp->snd_cwnd = 2;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001752 icsk->icsk_probes_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 tp->packets_out = 0;
1754 tp->snd_ssthresh = 0x7fffffff;
1755 tp->snd_cwnd_cnt = 0;
Stephen Hemminger9772efb2005-11-10 17:09:53 -08001756 tp->bytes_acked = 0;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001757 tcp_set_ca_state(sk, TCP_CA_Open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 tcp_clear_retrans(tp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001759 inet_csk_delack_init(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760 sk->sk_send_head = NULL;
1761 tp->rx_opt.saw_tstamp = 0;
1762 tcp_sack_reset(&tp->rx_opt);
1763 __sk_dst_reset(sk);
1764
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001765 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
1767 sk->sk_error_report(sk);
1768 return err;
1769}
1770
1771/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772 * Socket option code for TCP.
1773 */
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001774static int do_tcp_setsockopt(struct sock *sk, int level,
1775 int optname, char __user *optval, int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776{
1777 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001778 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 int val;
1780 int err = 0;
1781
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001782 /* This is a string value all the others are int's */
1783 if (optname == TCP_CONGESTION) {
1784 char name[TCP_CA_NAME_MAX];
1785
1786 if (optlen < 1)
1787 return -EINVAL;
1788
1789 val = strncpy_from_user(name, optval,
1790 min(TCP_CA_NAME_MAX-1, optlen));
1791 if (val < 0)
1792 return -EFAULT;
1793 name[val] = 0;
1794
1795 lock_sock(sk);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001796 err = tcp_set_congestion_control(sk, name);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001797 release_sock(sk);
1798 return err;
1799 }
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 if (optlen < sizeof(int))
1802 return -EINVAL;
1803
1804 if (get_user(val, (int __user *)optval))
1805 return -EFAULT;
1806
1807 lock_sock(sk);
1808
1809 switch (optname) {
1810 case TCP_MAXSEG:
1811 /* Values greater than interface MTU won't take effect. However
1812 * at the point when this call is done we typically don't yet
1813 * know which interface is going to be used */
1814 if (val < 8 || val > MAX_TCP_WINDOW) {
1815 err = -EINVAL;
1816 break;
1817 }
1818 tp->rx_opt.user_mss = val;
1819 break;
1820
1821 case TCP_NODELAY:
1822 if (val) {
1823 /* TCP_NODELAY is weaker than TCP_CORK, so that
1824 * this option on corked socket is remembered, but
1825 * it is not activated until cork is cleared.
1826 *
1827 * However, when TCP_NODELAY is set we make
1828 * an explicit push, which overrides even TCP_CORK
1829 * for currently queued segments.
1830 */
1831 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1832 tcp_push_pending_frames(sk, tp);
1833 } else {
1834 tp->nonagle &= ~TCP_NAGLE_OFF;
1835 }
1836 break;
1837
1838 case TCP_CORK:
1839 /* When set indicates to always queue non-full frames.
1840 * Later the user clears this option and we transmit
1841 * any pending partial frames in the queue. This is
1842 * meant to be used alongside sendfile() to get properly
1843 * filled frames when the user (for example) must write
1844 * out headers with a write() call first and then use
1845 * sendfile to send out the data parts.
1846 *
1847 * TCP_CORK can be set together with TCP_NODELAY and it is
1848 * stronger than TCP_NODELAY.
1849 */
1850 if (val) {
1851 tp->nonagle |= TCP_NAGLE_CORK;
1852 } else {
1853 tp->nonagle &= ~TCP_NAGLE_CORK;
1854 if (tp->nonagle&TCP_NAGLE_OFF)
1855 tp->nonagle |= TCP_NAGLE_PUSH;
1856 tcp_push_pending_frames(sk, tp);
1857 }
1858 break;
1859
1860 case TCP_KEEPIDLE:
1861 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1862 err = -EINVAL;
1863 else {
1864 tp->keepalive_time = val * HZ;
1865 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1866 !((1 << sk->sk_state) &
1867 (TCPF_CLOSE | TCPF_LISTEN))) {
1868 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1869 if (tp->keepalive_time > elapsed)
1870 elapsed = tp->keepalive_time - elapsed;
1871 else
1872 elapsed = 0;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001873 inet_csk_reset_keepalive_timer(sk, elapsed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001874 }
1875 }
1876 break;
1877 case TCP_KEEPINTVL:
1878 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1879 err = -EINVAL;
1880 else
1881 tp->keepalive_intvl = val * HZ;
1882 break;
1883 case TCP_KEEPCNT:
1884 if (val < 1 || val > MAX_TCP_KEEPCNT)
1885 err = -EINVAL;
1886 else
1887 tp->keepalive_probes = val;
1888 break;
1889 case TCP_SYNCNT:
1890 if (val < 1 || val > MAX_TCP_SYNCNT)
1891 err = -EINVAL;
1892 else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001893 icsk->icsk_syn_retries = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894 break;
1895
1896 case TCP_LINGER2:
1897 if (val < 0)
1898 tp->linger2 = -1;
1899 else if (val > sysctl_tcp_fin_timeout / HZ)
1900 tp->linger2 = 0;
1901 else
1902 tp->linger2 = val * HZ;
1903 break;
1904
1905 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001906 icsk->icsk_accept_queue.rskq_defer_accept = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001907 if (val > 0) {
1908 /* Translate value in seconds to number of
1909 * retransmits */
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001910 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 val > ((TCP_TIMEOUT_INIT / HZ) <<
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001912 icsk->icsk_accept_queue.rskq_defer_accept))
1913 icsk->icsk_accept_queue.rskq_defer_accept++;
1914 icsk->icsk_accept_queue.rskq_defer_accept++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001915 }
1916 break;
1917
1918 case TCP_WINDOW_CLAMP:
1919 if (!val) {
1920 if (sk->sk_state != TCP_CLOSE) {
1921 err = -EINVAL;
1922 break;
1923 }
1924 tp->window_clamp = 0;
1925 } else
1926 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1927 SOCK_MIN_RCVBUF / 2 : val;
1928 break;
1929
1930 case TCP_QUICKACK:
1931 if (!val) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001932 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001934 icsk->icsk_ack.pingpong = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001935 if ((1 << sk->sk_state) &
1936 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001937 inet_csk_ack_scheduled(sk)) {
1938 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
Chris Leech0e4b4992006-05-23 18:00:16 -07001939 tcp_cleanup_rbuf(sk, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940 if (!(val & 1))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001941 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942 }
1943 }
1944 break;
1945
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001946#ifdef CONFIG_TCP_MD5SIG
1947 case TCP_MD5SIG:
1948 /* Read the IP->Key mappings from userspace */
1949 err = tp->af_specific->md5_parse(sk, optval, optlen);
1950 break;
1951#endif
1952
Linus Torvalds1da177e2005-04-16 15:20:36 -07001953 default:
1954 err = -ENOPROTOOPT;
1955 break;
1956 };
1957 release_sock(sk);
1958 return err;
1959}
1960
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001961int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1962 int optlen)
1963{
1964 struct inet_connection_sock *icsk = inet_csk(sk);
1965
1966 if (level != SOL_TCP)
1967 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1968 optval, optlen);
1969 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1970}
1971
1972#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001973int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1974 char __user *optval, int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001975{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001976 if (level != SOL_TCP)
1977 return inet_csk_compat_setsockopt(sk, level, optname,
1978 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001979 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1980}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001981
1982EXPORT_SYMBOL(compat_tcp_setsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001983#endif
1984
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985/* Return information about state of tcp endpoint in API format. */
1986void tcp_get_info(struct sock *sk, struct tcp_info *info)
1987{
1988 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001989 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 u32 now = tcp_time_stamp;
1991
1992 memset(info, 0, sizeof(*info));
1993
1994 info->tcpi_state = sk->sk_state;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001995 info->tcpi_ca_state = icsk->icsk_ca_state;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001996 info->tcpi_retransmits = icsk->icsk_retransmits;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001997 info->tcpi_probes = icsk->icsk_probes_out;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001998 info->tcpi_backoff = icsk->icsk_backoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001999
2000 if (tp->rx_opt.tstamp_ok)
2001 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2002 if (tp->rx_opt.sack_ok)
2003 info->tcpi_options |= TCPI_OPT_SACK;
2004 if (tp->rx_opt.wscale_ok) {
2005 info->tcpi_options |= TCPI_OPT_WSCALE;
2006 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2007 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2008 }
2009
2010 if (tp->ecn_flags&TCP_ECN_OK)
2011 info->tcpi_options |= TCPI_OPT_ECN;
2012
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002013 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2014 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002015 info->tcpi_snd_mss = tp->mss_cache;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002016 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
2018 info->tcpi_unacked = tp->packets_out;
2019 info->tcpi_sacked = tp->sacked_out;
2020 info->tcpi_lost = tp->lost_out;
2021 info->tcpi_retrans = tp->retrans_out;
2022 info->tcpi_fackets = tp->fackets_out;
2023
2024 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002025 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002026 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2027
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002028 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2030 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2031 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2032 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2033 info->tcpi_snd_cwnd = tp->snd_cwnd;
2034 info->tcpi_advmss = tp->advmss;
2035 info->tcpi_reordering = tp->reordering;
2036
2037 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2038 info->tcpi_rcv_space = tp->rcvq_space.space;
2039
2040 info->tcpi_total_retrans = tp->total_retrans;
2041}
2042
2043EXPORT_SYMBOL_GPL(tcp_get_info);
2044
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002045static int do_tcp_getsockopt(struct sock *sk, int level,
2046 int optname, char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047{
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002048 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049 struct tcp_sock *tp = tcp_sk(sk);
2050 int val, len;
2051
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 if (get_user(len, optlen))
2053 return -EFAULT;
2054
2055 len = min_t(unsigned int, len, sizeof(int));
2056
2057 if (len < 0)
2058 return -EINVAL;
2059
2060 switch (optname) {
2061 case TCP_MAXSEG:
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002062 val = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2064 val = tp->rx_opt.user_mss;
2065 break;
2066 case TCP_NODELAY:
2067 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2068 break;
2069 case TCP_CORK:
2070 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2071 break;
2072 case TCP_KEEPIDLE:
2073 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2074 break;
2075 case TCP_KEEPINTVL:
2076 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2077 break;
2078 case TCP_KEEPCNT:
2079 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2080 break;
2081 case TCP_SYNCNT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002082 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083 break;
2084 case TCP_LINGER2:
2085 val = tp->linger2;
2086 if (val >= 0)
2087 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2088 break;
2089 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002090 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2091 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 break;
2093 case TCP_WINDOW_CLAMP:
2094 val = tp->window_clamp;
2095 break;
2096 case TCP_INFO: {
2097 struct tcp_info info;
2098
2099 if (get_user(len, optlen))
2100 return -EFAULT;
2101
2102 tcp_get_info(sk, &info);
2103
2104 len = min_t(unsigned int, len, sizeof(info));
2105 if (put_user(len, optlen))
2106 return -EFAULT;
2107 if (copy_to_user(optval, &info, len))
2108 return -EFAULT;
2109 return 0;
2110 }
2111 case TCP_QUICKACK:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002112 val = !icsk->icsk_ack.pingpong;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 break;
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002114
2115 case TCP_CONGESTION:
2116 if (get_user(len, optlen))
2117 return -EFAULT;
2118 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2119 if (put_user(len, optlen))
2120 return -EFAULT;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002121 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002122 return -EFAULT;
2123 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 default:
2125 return -ENOPROTOOPT;
2126 };
2127
2128 if (put_user(len, optlen))
2129 return -EFAULT;
2130 if (copy_to_user(optval, &val, len))
2131 return -EFAULT;
2132 return 0;
2133}
2134
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002135int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2136 int __user *optlen)
2137{
2138 struct inet_connection_sock *icsk = inet_csk(sk);
2139
2140 if (level != SOL_TCP)
2141 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2142 optval, optlen);
2143 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2144}
2145
2146#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002147int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2148 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002149{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08002150 if (level != SOL_TCP)
2151 return inet_csk_compat_getsockopt(sk, level, optname,
2152 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002153 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2154}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002155
2156EXPORT_SYMBOL(compat_tcp_getsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002157#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158
Herbert Xu576a30e2006-06-27 13:22:38 -07002159struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07002160{
2161 struct sk_buff *segs = ERR_PTR(-EINVAL);
2162 struct tcphdr *th;
2163 unsigned thlen;
2164 unsigned int seq;
Al Virod3bc23e2006-11-14 21:24:49 -08002165 __be32 delta;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002166 unsigned int oldlen;
2167 unsigned int len;
2168
2169 if (!pskb_may_pull(skb, sizeof(*th)))
2170 goto out;
2171
2172 th = skb->h.th;
2173 thlen = th->doff * 4;
2174 if (thlen < sizeof(*th))
2175 goto out;
2176
2177 if (!pskb_may_pull(skb, thlen))
2178 goto out;
2179
Herbert Xu0718bcc2006-06-25 23:55:46 -07002180 oldlen = (u16)~skb->len;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002181 __skb_pull(skb, thlen);
2182
Herbert Xu3820c3f2006-06-29 20:11:25 -07002183 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2184 /* Packet is from an untrusted source, reset gso_segs. */
Herbert Xubbcf4672006-07-03 19:38:35 -07002185 int type = skb_shinfo(skb)->gso_type;
2186 int mss;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002187
Herbert Xubbcf4672006-07-03 19:38:35 -07002188 if (unlikely(type &
2189 ~(SKB_GSO_TCPV4 |
2190 SKB_GSO_DODGY |
2191 SKB_GSO_TCP_ECN |
2192 SKB_GSO_TCPV6 |
2193 0) ||
2194 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2195 goto out;
2196
2197 mss = skb_shinfo(skb)->gso_size;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002198 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2199
2200 segs = NULL;
2201 goto out;
2202 }
2203
Herbert Xu576a30e2006-06-27 13:22:38 -07002204 segs = skb_segment(skb, features);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002205 if (IS_ERR(segs))
2206 goto out;
2207
2208 len = skb_shinfo(skb)->gso_size;
Herbert Xu0718bcc2006-06-25 23:55:46 -07002209 delta = htonl(oldlen + (thlen + len));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002210
2211 skb = segs;
2212 th = skb->h.th;
2213 seq = ntohl(th->seq);
2214
2215 do {
2216 th->fin = th->psh = 0;
2217
Al Virod3bc23e2006-11-14 21:24:49 -08002218 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2219 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002220 if (skb->ip_summed != CHECKSUM_PARTIAL)
Herbert Xu0718bcc2006-06-25 23:55:46 -07002221 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2222 skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002223
2224 seq += len;
2225 skb = skb->next;
2226 th = skb->h.th;
2227
2228 th->seq = htonl(seq);
2229 th->cwr = 0;
2230 } while (skb->next);
2231
Herbert Xu0718bcc2006-06-25 23:55:46 -07002232 delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
Al Virod3bc23e2006-11-14 21:24:49 -08002233 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2234 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002235 if (skb->ip_summed != CHECKSUM_PARTIAL)
Herbert Xu0718bcc2006-06-25 23:55:46 -07002236 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2237 skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002238
2239out:
2240 return segs;
2241}
Herbert Xuadcfc7d2006-06-30 13:36:15 -07002242EXPORT_SYMBOL(tcp_tso_segment);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002243
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002244#ifdef CONFIG_TCP_MD5SIG
2245static unsigned long tcp_md5sig_users;
2246static struct tcp_md5sig_pool **tcp_md5sig_pool;
2247static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2248
2249static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2250{
2251 int cpu;
2252 for_each_possible_cpu(cpu) {
2253 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2254 if (p) {
2255 if (p->md5_desc.tfm)
2256 crypto_free_hash(p->md5_desc.tfm);
2257 kfree(p);
2258 p = NULL;
2259 }
2260 }
2261 free_percpu(pool);
2262}
2263
2264void tcp_free_md5sig_pool(void)
2265{
2266 struct tcp_md5sig_pool **pool = NULL;
2267
2268 spin_lock(&tcp_md5sig_pool_lock);
2269 if (--tcp_md5sig_users == 0) {
2270 pool = tcp_md5sig_pool;
2271 tcp_md5sig_pool = NULL;
2272 }
2273 spin_unlock(&tcp_md5sig_pool_lock);
2274 if (pool)
2275 __tcp_free_md5sig_pool(pool);
2276}
2277
2278EXPORT_SYMBOL(tcp_free_md5sig_pool);
2279
2280struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2281{
2282 int cpu;
2283 struct tcp_md5sig_pool **pool;
2284
2285 pool = alloc_percpu(struct tcp_md5sig_pool *);
2286 if (!pool)
2287 return NULL;
2288
2289 for_each_possible_cpu(cpu) {
2290 struct tcp_md5sig_pool *p;
2291 struct crypto_hash *hash;
2292
2293 p = kzalloc(sizeof(*p), GFP_KERNEL);
2294 if (!p)
2295 goto out_free;
2296 *per_cpu_ptr(pool, cpu) = p;
2297
2298 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2299 if (!hash || IS_ERR(hash))
2300 goto out_free;
2301
2302 p->md5_desc.tfm = hash;
2303 }
2304 return pool;
2305out_free:
2306 __tcp_free_md5sig_pool(pool);
2307 return NULL;
2308}
2309
2310struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2311{
2312 struct tcp_md5sig_pool **pool;
2313 int alloc = 0;
2314
2315retry:
2316 spin_lock(&tcp_md5sig_pool_lock);
2317 pool = tcp_md5sig_pool;
2318 if (tcp_md5sig_users++ == 0) {
2319 alloc = 1;
2320 spin_unlock(&tcp_md5sig_pool_lock);
2321 } else if (!pool) {
2322 tcp_md5sig_users--;
2323 spin_unlock(&tcp_md5sig_pool_lock);
2324 cpu_relax();
2325 goto retry;
2326 } else
2327 spin_unlock(&tcp_md5sig_pool_lock);
2328
2329 if (alloc) {
2330 /* we cannot hold spinlock here because this may sleep. */
2331 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2332 spin_lock(&tcp_md5sig_pool_lock);
2333 if (!p) {
2334 tcp_md5sig_users--;
2335 spin_unlock(&tcp_md5sig_pool_lock);
2336 return NULL;
2337 }
2338 pool = tcp_md5sig_pool;
2339 if (pool) {
2340 /* oops, it has already been assigned. */
2341 spin_unlock(&tcp_md5sig_pool_lock);
2342 __tcp_free_md5sig_pool(p);
2343 } else {
2344 tcp_md5sig_pool = pool = p;
2345 spin_unlock(&tcp_md5sig_pool_lock);
2346 }
2347 }
2348 return pool;
2349}
2350
2351EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2352
2353struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2354{
2355 struct tcp_md5sig_pool **p;
2356 spin_lock(&tcp_md5sig_pool_lock);
2357 p = tcp_md5sig_pool;
2358 if (p)
2359 tcp_md5sig_users++;
2360 spin_unlock(&tcp_md5sig_pool_lock);
2361 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2362}
2363
2364EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2365
2366void __tcp_put_md5sig_pool(void) {
2367 __tcp_free_md5sig_pool(tcp_md5sig_pool);
2368}
2369
2370EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2371#endif
2372
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373extern void __skb_cb_too_small_for_tcp(int, int);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002374extern struct tcp_congestion_ops tcp_reno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375
2376static __initdata unsigned long thash_entries;
2377static int __init set_thash_entries(char *str)
2378{
2379 if (!str)
2380 return 0;
2381 thash_entries = simple_strtoul(str, &str, 0);
2382 return 1;
2383}
2384__setup("thash_entries=", set_thash_entries);
2385
2386void __init tcp_init(void)
2387{
2388 struct sk_buff *skb = NULL;
John Heffner7b4f4b52006-03-25 01:34:07 -08002389 unsigned long limit;
2390 int order, i, max_share;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391
2392 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2393 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2394 sizeof(skb->cb));
2395
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002396 tcp_hashinfo.bind_bucket_cachep =
2397 kmem_cache_create("tcp_bind_bucket",
2398 sizeof(struct inet_bind_bucket), 0,
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07002399 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 /* Size and allocate the main established and bind bucket
2402 * hash tables.
2403 *
2404 * The methodology is similar to that of the buffer cache.
2405 */
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002406 tcp_hashinfo.ehash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 alloc_large_system_hash("TCP established",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002408 sizeof(struct inet_ehash_bucket),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 thash_entries,
2410 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002411 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002412 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002413 &tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002414 NULL,
2415 0);
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002416 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2417 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2418 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2419 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002420 }
2421
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002422 tcp_hashinfo.bhash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423 alloc_large_system_hash("TCP bind",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002424 sizeof(struct inet_bind_hashbucket),
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002425 tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002426 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002427 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002428 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002429 &tcp_hashinfo.bhash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002430 NULL,
2431 64 * 1024);
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002432 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2433 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2434 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2435 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436 }
2437
2438 /* Try to be a bit smarter and adjust defaults depending
2439 * on available memory.
2440 */
2441 for (order = 0; ((1 << order) << PAGE_SHIFT) <
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002442 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 order++)
2444 ;
Andi Kleene7626482005-06-13 14:24:52 -07002445 if (order >= 4) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 sysctl_local_port_range[0] = 32768;
2447 sysctl_local_port_range[1] = 61000;
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002448 tcp_death_row.sysctl_max_tw_buckets = 180000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002449 sysctl_tcp_max_orphans = 4096 << (order - 4);
2450 sysctl_max_syn_backlog = 1024;
2451 } else if (order < 3) {
2452 sysctl_local_port_range[0] = 1024 * (3 - order);
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002453 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 sysctl_tcp_max_orphans >>= (3 - order);
2455 sysctl_max_syn_backlog = 128;
2456 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457
John Heffner52bf3762006-11-14 20:25:17 -08002458 /* Allow no more than 3/4 kernel memory (usually less) allocated to TCP */
2459 sysctl_tcp_mem[0] = (1536 / sizeof (struct inet_bind_hashbucket)) << order;
2460 sysctl_tcp_mem[1] = sysctl_tcp_mem[0] * 4 / 3;
2461 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462
John Heffner7b4f4b52006-03-25 01:34:07 -08002463 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2464 max_share = min(4UL*1024*1024, limit);
2465
2466 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2467 sysctl_tcp_wmem[1] = 16*1024;
2468 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2469
2470 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2471 sysctl_tcp_rmem[1] = 87380;
2472 sysctl_tcp_rmem[2] = max(87380, max_share);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473
2474 printk(KERN_INFO "TCP: Hash tables configured "
2475 "(established %d bind %d)\n",
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002476 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002477
2478 tcp_register_congestion_control(&tcp_reno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479}
2480
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481EXPORT_SYMBOL(tcp_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482EXPORT_SYMBOL(tcp_disconnect);
2483EXPORT_SYMBOL(tcp_getsockopt);
2484EXPORT_SYMBOL(tcp_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002485EXPORT_SYMBOL(tcp_poll);
2486EXPORT_SYMBOL(tcp_read_sock);
2487EXPORT_SYMBOL(tcp_recvmsg);
2488EXPORT_SYMBOL(tcp_sendmsg);
2489EXPORT_SYMBOL(tcp_sendpage);
2490EXPORT_SYMBOL(tcp_setsockopt);
2491EXPORT_SYMBOL(tcp_shutdown);
2492EXPORT_SYMBOL(tcp_statistics);