blob: 090c690627e5d12c9540119de37fb14f813e8592 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
21 *
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
209 *
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
214 *
215 * Description of States:
216 *
217 * TCP_SYN_SENT sent a connection request, waiting for ack
218 *
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
221 *
222 * TCP_ESTABLISHED connection established
223 *
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
226 *
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
229 *
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
232 *
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
238 *
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
242 *
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
246 *
247 * TCP_CLOSE socket is finished
248 */
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250#include <linux/module.h>
251#include <linux/types.h>
252#include <linux/fcntl.h>
253#include <linux/poll.h>
254#include <linux/init.h>
255#include <linux/smp_lock.h>
256#include <linux/fs.h>
257#include <linux/random.h>
258#include <linux/bootmem.h>
David S. Millerb8059ea2006-03-25 01:36:56 -0800259#include <linux/cache.h>
Herbert Xuf4c50d92006-06-22 03:02:40 -0700260#include <linux/err.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800261#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263#include <net/icmp.h>
264#include <net/tcp.h>
265#include <net/xfrm.h>
266#include <net/ip.h>
Chris Leech1a2449a2006-05-23 18:05:53 -0700267#include <net/netdma.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268
269#include <asm/uaccess.h>
270#include <asm/ioctls.h>
271
Brian Haleyab32ea52006-09-22 14:15:41 -0700272int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273
Eric Dumazetba899662005-08-26 12:05:31 -0700274DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276atomic_t tcp_orphan_count = ATOMIC_INIT(0);
277
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -0700278EXPORT_SYMBOL_GPL(tcp_orphan_count);
279
David S. Millerb8059ea2006-03-25 01:36:56 -0800280int sysctl_tcp_mem[3] __read_mostly;
281int sysctl_tcp_wmem[3] __read_mostly;
282int sysctl_tcp_rmem[3] __read_mostly;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284EXPORT_SYMBOL(sysctl_tcp_mem);
285EXPORT_SYMBOL(sysctl_tcp_rmem);
286EXPORT_SYMBOL(sysctl_tcp_wmem);
287
288atomic_t tcp_memory_allocated; /* Current allocated memory. */
289atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
290
291EXPORT_SYMBOL(tcp_memory_allocated);
292EXPORT_SYMBOL(tcp_sockets_allocated);
293
294/*
295 * Pressure flag: try to collapse.
296 * Technical note: it is used by multiple contexts non atomically.
297 * All the sk_stream_mem_schedule() is of this nature: accounting
298 * is strict, actions are advisory and have some latency.
299 */
300int tcp_memory_pressure;
301
302EXPORT_SYMBOL(tcp_memory_pressure);
303
304void tcp_enter_memory_pressure(void)
305{
306 if (!tcp_memory_pressure) {
307 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
308 tcp_memory_pressure = 1;
309 }
310}
311
312EXPORT_SYMBOL(tcp_enter_memory_pressure);
313
314/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 * Wait for a TCP event.
316 *
317 * Note that we don't need to lock the socket, as the upper poll layers
318 * take care of normal races (between the test and the event) and we don't
319 * go look at any of the socket buffers directly.
320 */
321unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
322{
323 unsigned int mask;
324 struct sock *sk = sock->sk;
325 struct tcp_sock *tp = tcp_sk(sk);
326
327 poll_wait(file, sk->sk_sleep, wait);
328 if (sk->sk_state == TCP_LISTEN)
Arnaldo Carvalho de Melodc40c7b2005-08-23 21:52:58 -0700329 return inet_csk_listen_poll(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331 /* Socket is not locked. We are protected from async events
332 by poll logic and correct handling of state changes
333 made by another threads is impossible in any case.
334 */
335
336 mask = 0;
337 if (sk->sk_err)
338 mask = POLLERR;
339
340 /*
341 * POLLHUP is certainly not done right. But poll() doesn't
342 * have a notion of HUP in just one direction, and for a
343 * socket the read side is more interesting.
344 *
345 * Some poll() documentation says that POLLHUP is incompatible
346 * with the POLLOUT/POLLWR flags, so somebody should check this
347 * all. But careful, it tends to be safer to return too many
348 * bits than too few, and you can easily break real applications
349 * if you don't tell them that something has hung up!
350 *
351 * Check-me.
352 *
353 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
354 * our fs/select.c). It means that after we received EOF,
355 * poll always returns immediately, making impossible poll() on write()
356 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
357 * if and only if shutdown has been made in both directions.
358 * Actually, it is interesting to look how Solaris and DUX
359 * solve this dilemma. I would prefer, if PULLHUP were maskable,
360 * then we could set it on SND_SHUTDOWN. BTW examples given
361 * in Stevens' books assume exactly this behaviour, it explains
362 * why PULLHUP is incompatible with POLLOUT. --ANK
363 *
364 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
365 * blocking on fresh not-connected or disconnected socket. --ANK
366 */
367 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
368 mask |= POLLHUP;
369 if (sk->sk_shutdown & RCV_SHUTDOWN)
Davide Libenzif348d702006-03-25 03:07:39 -0800370 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
372 /* Connected? */
373 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
374 /* Potential race condition. If read of tp below will
375 * escape above sk->sk_state, we can be illegally awaken
376 * in SYN_* states. */
377 if ((tp->rcv_nxt != tp->copied_seq) &&
378 (tp->urg_seq != tp->copied_seq ||
379 tp->rcv_nxt != tp->copied_seq + 1 ||
380 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
381 mask |= POLLIN | POLLRDNORM;
382
383 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
384 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
385 mask |= POLLOUT | POLLWRNORM;
386 } else { /* send SIGIO later */
387 set_bit(SOCK_ASYNC_NOSPACE,
388 &sk->sk_socket->flags);
389 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
390
391 /* Race breaker. If space is freed after
392 * wspace test but before the flags are set,
393 * IO signal will be lost.
394 */
395 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
396 mask |= POLLOUT | POLLWRNORM;
397 }
398 }
399
400 if (tp->urg_data & TCP_URG_VALID)
401 mask |= POLLPRI;
402 }
403 return mask;
404}
405
406int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
407{
408 struct tcp_sock *tp = tcp_sk(sk);
409 int answ;
410
411 switch (cmd) {
412 case SIOCINQ:
413 if (sk->sk_state == TCP_LISTEN)
414 return -EINVAL;
415
416 lock_sock(sk);
417 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
418 answ = 0;
419 else if (sock_flag(sk, SOCK_URGINLINE) ||
420 !tp->urg_data ||
421 before(tp->urg_seq, tp->copied_seq) ||
422 !before(tp->urg_seq, tp->rcv_nxt)) {
423 answ = tp->rcv_nxt - tp->copied_seq;
424
425 /* Subtract 1, if FIN is in queue. */
426 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
427 answ -=
428 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
429 } else
430 answ = tp->urg_seq - tp->copied_seq;
431 release_sock(sk);
432 break;
433 case SIOCATMARK:
434 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
435 break;
436 case SIOCOUTQ:
437 if (sk->sk_state == TCP_LISTEN)
438 return -EINVAL;
439
440 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
441 answ = 0;
442 else
443 answ = tp->write_seq - tp->snd_una;
444 break;
445 default:
446 return -ENOIOCTLCMD;
447 };
448
449 return put_user(answ, (int __user *)arg);
450}
451
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
453{
454 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
455 tp->pushed_seq = tp->write_seq;
456}
457
458static inline int forced_push(struct tcp_sock *tp)
459{
460 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
461}
462
463static inline void skb_entail(struct sock *sk, struct tcp_sock *tp,
464 struct sk_buff *skb)
465{
Arnaldo Carvalho de Melo352d4802006-11-17 19:59:12 -0200466 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
467
468 skb->csum = 0;
469 tcb->seq = tcb->end_seq = tp->write_seq;
470 tcb->flags = TCPCB_FLAG_ACK;
471 tcb->sacked = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 skb_header_release(skb);
473 __skb_queue_tail(&sk->sk_write_queue, skb);
474 sk_charge_skb(sk, skb);
475 if (!sk->sk_send_head)
476 sk->sk_send_head = skb;
David S. Miller89ebd192005-08-23 10:13:06 -0700477 if (tp->nonagle & TCP_NAGLE_PUSH)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 tp->nonagle &= ~TCP_NAGLE_PUSH;
479}
480
481static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
482 struct sk_buff *skb)
483{
484 if (flags & MSG_OOB) {
485 tp->urg_mode = 1;
486 tp->snd_up = tp->write_seq;
487 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
488 }
489}
490
491static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags,
492 int mss_now, int nonagle)
493{
494 if (sk->sk_send_head) {
495 struct sk_buff *skb = sk->sk_write_queue.prev;
496 if (!(flags & MSG_MORE) || forced_push(tp))
497 tcp_mark_push(tp, skb);
498 tcp_mark_urg(tp, flags, skb);
499 __tcp_push_pending_frames(sk, tp, mss_now,
500 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
501 }
502}
503
504static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
505 size_t psize, int flags)
506{
507 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700508 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 int err;
510 ssize_t copied;
511 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
512
513 /* Wait for a connection to finish. */
514 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
515 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
516 goto out_err;
517
518 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
519
520 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700521 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 copied = 0;
523
524 err = -EPIPE;
525 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
526 goto do_error;
527
528 while (psize > 0) {
529 struct sk_buff *skb = sk->sk_write_queue.prev;
530 struct page *page = pages[poffset / PAGE_SIZE];
531 int copy, i, can_coalesce;
532 int offset = poffset % PAGE_SIZE;
533 int size = min_t(size_t, psize, PAGE_SIZE - offset);
534
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700535 if (!sk->sk_send_head || (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536new_segment:
537 if (!sk_stream_memory_free(sk))
538 goto wait_for_sndbuf;
539
540 skb = sk_stream_alloc_pskb(sk, 0, 0,
541 sk->sk_allocation);
542 if (!skb)
543 goto wait_for_memory;
544
545 skb_entail(sk, tp, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700546 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 }
548
549 if (copy > size)
550 copy = size;
551
552 i = skb_shinfo(skb)->nr_frags;
553 can_coalesce = skb_can_coalesce(skb, i, page, offset);
554 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
555 tcp_mark_push(tp, skb);
556 goto new_segment;
557 }
Herbert Xud80d99d62005-09-01 17:48:23 -0700558 if (!sk_stream_wmem_schedule(sk, copy))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 goto wait_for_memory;
560
561 if (can_coalesce) {
562 skb_shinfo(skb)->frags[i - 1].size += copy;
563 } else {
564 get_page(page);
565 skb_fill_page_desc(skb, i, page, offset, copy);
566 }
567
568 skb->len += copy;
569 skb->data_len += copy;
570 skb->truesize += copy;
571 sk->sk_wmem_queued += copy;
572 sk->sk_forward_alloc -= copy;
Patrick McHardy84fa7932006-08-29 16:44:56 -0700573 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 tp->write_seq += copy;
575 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700576 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 if (!copied)
579 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
580
581 copied += copy;
582 poffset += copy;
583 if (!(psize -= copy))
584 goto out;
585
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700586 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 continue;
588
589 if (forced_push(tp)) {
590 tcp_mark_push(tp, skb);
591 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
592 } else if (skb == sk->sk_send_head)
593 tcp_push_one(sk, mss_now);
594 continue;
595
596wait_for_sndbuf:
597 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
598wait_for_memory:
599 if (copied)
600 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
601
602 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
603 goto do_error;
604
605 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700606 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 }
608
609out:
610 if (copied)
611 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
612 return copied;
613
614do_error:
615 if (copied)
616 goto out;
617out_err:
618 return sk_stream_error(sk, flags, err);
619}
620
621ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
622 size_t size, int flags)
623{
624 ssize_t res;
625 struct sock *sk = sock->sk;
626
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 if (!(sk->sk_route_caps & NETIF_F_SG) ||
Herbert Xu8648b302006-06-17 22:06:05 -0700628 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 return sock_no_sendpage(sock, page, offset, size, flags);
630
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 lock_sock(sk);
632 TCP_CHECK_TIMER(sk);
633 res = do_tcp_sendpages(sk, &page, offset, size, flags);
634 TCP_CHECK_TIMER(sk);
635 release_sock(sk);
636 return res;
637}
638
639#define TCP_PAGE(sk) (sk->sk_sndmsg_page)
640#define TCP_OFF(sk) (sk->sk_sndmsg_off)
641
642static inline int select_size(struct sock *sk, struct tcp_sock *tp)
643{
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700644 int tmp = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645
David S. Millerb4e26f52005-07-05 15:20:27 -0700646 if (sk->sk_route_caps & NETIF_F_SG) {
Herbert Xubcd76112006-06-30 13:36:35 -0700647 if (sk_can_gso(sk))
David S. Millerb4e26f52005-07-05 15:20:27 -0700648 tmp = 0;
649 else {
650 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
651
652 if (tmp >= pgbreak &&
653 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
654 tmp = pgbreak;
655 }
656 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700657
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 return tmp;
659}
660
661int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
662 size_t size)
663{
664 struct iovec *iov;
665 struct tcp_sock *tp = tcp_sk(sk);
666 struct sk_buff *skb;
667 int iovlen, flags;
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700668 int mss_now, size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669 int err, copied;
670 long timeo;
671
672 lock_sock(sk);
673 TCP_CHECK_TIMER(sk);
674
675 flags = msg->msg_flags;
676 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
677
678 /* Wait for a connection to finish. */
679 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
680 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
681 goto out_err;
682
683 /* This should be in poll */
684 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
685
686 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700687 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689 /* Ok commence sending. */
690 iovlen = msg->msg_iovlen;
691 iov = msg->msg_iov;
692 copied = 0;
693
694 err = -EPIPE;
695 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
696 goto do_error;
697
698 while (--iovlen >= 0) {
699 int seglen = iov->iov_len;
700 unsigned char __user *from = iov->iov_base;
701
702 iov++;
703
704 while (seglen > 0) {
705 int copy;
706
707 skb = sk->sk_write_queue.prev;
708
709 if (!sk->sk_send_head ||
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700710 (copy = size_goal - skb->len) <= 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712new_segment:
713 /* Allocate new segment. If the interface is SG,
714 * allocate skb fitting to single page.
715 */
716 if (!sk_stream_memory_free(sk))
717 goto wait_for_sndbuf;
718
719 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
720 0, sk->sk_allocation);
721 if (!skb)
722 goto wait_for_memory;
723
724 /*
725 * Check whether we can use HW checksum.
726 */
Herbert Xu8648b302006-06-17 22:06:05 -0700727 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
Patrick McHardy84fa7932006-08-29 16:44:56 -0700728 skb->ip_summed = CHECKSUM_PARTIAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
730 skb_entail(sk, tp, skb);
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700731 copy = size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732 }
733
734 /* Try to append data to the end of skb. */
735 if (copy > seglen)
736 copy = seglen;
737
738 /* Where to copy to? */
739 if (skb_tailroom(skb) > 0) {
740 /* We have some space in skb head. Superb! */
741 if (copy > skb_tailroom(skb))
742 copy = skb_tailroom(skb);
743 if ((err = skb_add_data(skb, from, copy)) != 0)
744 goto do_fault;
745 } else {
746 int merge = 0;
747 int i = skb_shinfo(skb)->nr_frags;
748 struct page *page = TCP_PAGE(sk);
749 int off = TCP_OFF(sk);
750
751 if (skb_can_coalesce(skb, i, page, off) &&
752 off != PAGE_SIZE) {
753 /* We can extend the last page
754 * fragment. */
755 merge = 1;
756 } else if (i == MAX_SKB_FRAGS ||
757 (!i &&
758 !(sk->sk_route_caps & NETIF_F_SG))) {
759 /* Need to add new fragment and cannot
760 * do this because interface is non-SG,
761 * or because all the page slots are
762 * busy. */
763 tcp_mark_push(tp, skb);
764 goto new_segment;
765 } else if (page) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 if (off == PAGE_SIZE) {
767 put_page(page);
768 TCP_PAGE(sk) = page = NULL;
Herbert Xufb5f5e62005-09-05 18:55:48 -0700769 off = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 }
Herbert Xuef015782005-09-01 17:48:59 -0700771 } else
Herbert Xufb5f5e62005-09-05 18:55:48 -0700772 off = 0;
Herbert Xuef015782005-09-01 17:48:59 -0700773
774 if (copy > PAGE_SIZE - off)
775 copy = PAGE_SIZE - off;
776
777 if (!sk_stream_wmem_schedule(sk, copy))
778 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779
780 if (!page) {
781 /* Allocate new cache page. */
782 if (!(page = sk_stream_alloc_page(sk)))
783 goto wait_for_memory;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 }
785
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786 /* Time to copy data. We are close to
787 * the end! */
788 err = skb_copy_to_page(sk, from, skb, page,
789 off, copy);
790 if (err) {
791 /* If this page was new, give it to the
792 * socket so it does not get leaked.
793 */
794 if (!TCP_PAGE(sk)) {
795 TCP_PAGE(sk) = page;
796 TCP_OFF(sk) = 0;
797 }
798 goto do_error;
799 }
800
801 /* Update the skb. */
802 if (merge) {
803 skb_shinfo(skb)->frags[i - 1].size +=
804 copy;
805 } else {
806 skb_fill_page_desc(skb, i, page, off, copy);
807 if (TCP_PAGE(sk)) {
808 get_page(page);
809 } else if (off + copy < PAGE_SIZE) {
810 get_page(page);
811 TCP_PAGE(sk) = page;
812 }
813 }
814
815 TCP_OFF(sk) = off + copy;
816 }
817
818 if (!copied)
819 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
820
821 tp->write_seq += copy;
822 TCP_SKB_CB(skb)->end_seq += copy;
Herbert Xu79671682006-06-22 02:40:14 -0700823 skb_shinfo(skb)->gso_segs = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
825 from += copy;
826 copied += copy;
827 if ((seglen -= copy) == 0 && iovlen == 0)
828 goto out;
829
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700830 if (skb->len < mss_now || (flags & MSG_OOB))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 continue;
832
833 if (forced_push(tp)) {
834 tcp_mark_push(tp, skb);
835 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
836 } else if (skb == sk->sk_send_head)
837 tcp_push_one(sk, mss_now);
838 continue;
839
840wait_for_sndbuf:
841 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
842wait_for_memory:
843 if (copied)
844 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
845
846 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
847 goto do_error;
848
849 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
David S. Millerc1b4a7e2005-07-05 15:24:38 -0700850 size_goal = tp->xmit_size_goal;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 }
852 }
853
854out:
855 if (copied)
856 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
857 TCP_CHECK_TIMER(sk);
858 release_sock(sk);
859 return copied;
860
861do_fault:
862 if (!skb->len) {
863 if (sk->sk_send_head == skb)
864 sk->sk_send_head = NULL;
David S. Miller8728b832005-08-09 19:25:21 -0700865 __skb_unlink(skb, &sk->sk_write_queue);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 sk_stream_free_skb(sk, skb);
867 }
868
869do_error:
870 if (copied)
871 goto out;
872out_err:
873 err = sk_stream_error(sk, flags, err);
874 TCP_CHECK_TIMER(sk);
875 release_sock(sk);
876 return err;
877}
878
879/*
880 * Handle reading urgent data. BSD has very simple semantics for
881 * this, no blocking and very strange errors 8)
882 */
883
884static int tcp_recv_urg(struct sock *sk, long timeo,
885 struct msghdr *msg, int len, int flags,
886 int *addr_len)
887{
888 struct tcp_sock *tp = tcp_sk(sk);
889
890 /* No URG data to read. */
891 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
892 tp->urg_data == TCP_URG_READ)
893 return -EINVAL; /* Yes this is right ! */
894
895 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
896 return -ENOTCONN;
897
898 if (tp->urg_data & TCP_URG_VALID) {
899 int err = 0;
900 char c = tp->urg_data;
901
902 if (!(flags & MSG_PEEK))
903 tp->urg_data = TCP_URG_READ;
904
905 /* Read urgent data. */
906 msg->msg_flags |= MSG_OOB;
907
908 if (len > 0) {
909 if (!(flags & MSG_TRUNC))
910 err = memcpy_toiovec(msg->msg_iov, &c, 1);
911 len = 1;
912 } else
913 msg->msg_flags |= MSG_TRUNC;
914
915 return err ? -EFAULT : len;
916 }
917
918 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
919 return 0;
920
921 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
922 * the available implementations agree in this case:
923 * this call should never block, independent of the
924 * blocking state of the socket.
925 * Mike <pall@rz.uni-karlsruhe.de>
926 */
927 return -EAGAIN;
928}
929
930/* Clean up the receive buffer for full frames taken by the user,
931 * then send an ACK if necessary. COPIED is the number of bytes
932 * tcp_recvmsg has given to the user so far, it speeds up the
933 * calculation of whether or not we must ACK for the sake of
934 * a window update.
935 */
Chris Leech0e4b4992006-05-23 18:00:16 -0700936void tcp_cleanup_rbuf(struct sock *sk, int copied)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937{
938 struct tcp_sock *tp = tcp_sk(sk);
939 int time_to_ack = 0;
940
941#if TCP_DEBUG
942 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
943
944 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
945#endif
946
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700947 if (inet_csk_ack_scheduled(sk)) {
948 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 /* Delayed ACKs frequently hit locked sockets during bulk
950 * receive. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700951 if (icsk->icsk_ack.blocked ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 /* Once-per-two-segments ACK was not sent by tcp_input.c */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700953 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 /*
955 * If this read emptied read buffer, we send ACK, if
956 * connection is not bidirectional, user drained
957 * receive buffer and there was a small segment
958 * in queue.
959 */
Alexey Kuznetsov1ef96962006-09-19 12:52:50 -0700960 (copied > 0 &&
961 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
962 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
963 !icsk->icsk_ack.pingpong)) &&
964 !atomic_read(&sk->sk_rmem_alloc)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 time_to_ack = 1;
966 }
967
968 /* We send an ACK if we can now advertise a non-zero window
969 * which has been raised "significantly".
970 *
971 * Even if window raised up to infinity, do not send window open ACK
972 * in states, where we will not receive more. It is useless.
973 */
974 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
975 __u32 rcv_window_now = tcp_receive_window(tp);
976
977 /* Optimize, __tcp_select_window() is not cheap. */
978 if (2*rcv_window_now <= tp->window_clamp) {
979 __u32 new_window = __tcp_select_window(sk);
980
981 /* Send ACK now, if this read freed lots of space
982 * in our buffer. Certainly, new_window is new window.
983 * We can advertise it now, if it is not less than current one.
984 * "Lots" means "at least twice" here.
985 */
986 if (new_window && new_window >= 2 * rcv_window_now)
987 time_to_ack = 1;
988 }
989 }
990 if (time_to_ack)
991 tcp_send_ack(sk);
992}
993
994static void tcp_prequeue_process(struct sock *sk)
995{
996 struct sk_buff *skb;
997 struct tcp_sock *tp = tcp_sk(sk);
998
David S. Millerb03efcf2005-07-08 14:57:23 -0700999 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000
1001 /* RX process wants to run with disabled BHs, though it is not
1002 * necessary */
1003 local_bh_disable();
1004 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1005 sk->sk_backlog_rcv(sk, skb);
1006 local_bh_enable();
1007
1008 /* Clear memory counter. */
1009 tp->ucopy.memory = 0;
1010}
1011
1012static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1013{
1014 struct sk_buff *skb;
1015 u32 offset;
1016
1017 skb_queue_walk(&sk->sk_receive_queue, skb) {
1018 offset = seq - TCP_SKB_CB(skb)->seq;
1019 if (skb->h.th->syn)
1020 offset--;
1021 if (offset < skb->len || skb->h.th->fin) {
1022 *off = offset;
1023 return skb;
1024 }
1025 }
1026 return NULL;
1027}
1028
1029/*
1030 * This routine provides an alternative to tcp_recvmsg() for routines
1031 * that would like to handle copying from skbuffs directly in 'sendfile'
1032 * fashion.
1033 * Note:
1034 * - It is assumed that the socket was locked by the caller.
1035 * - The routine does not block.
1036 * - At present, there is no support for reading OOB data
1037 * or for 'peeking' the socket using this routine
1038 * (although both would be easy to implement).
1039 */
1040int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1041 sk_read_actor_t recv_actor)
1042{
1043 struct sk_buff *skb;
1044 struct tcp_sock *tp = tcp_sk(sk);
1045 u32 seq = tp->copied_seq;
1046 u32 offset;
1047 int copied = 0;
1048
1049 if (sk->sk_state == TCP_LISTEN)
1050 return -ENOTCONN;
1051 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1052 if (offset < skb->len) {
1053 size_t used, len;
1054
1055 len = skb->len - offset;
1056 /* Stop reading if we hit a patch of urgent data */
1057 if (tp->urg_data) {
1058 u32 urg_offset = tp->urg_seq - seq;
1059 if (urg_offset < len)
1060 len = urg_offset;
1061 if (!len)
1062 break;
1063 }
1064 used = recv_actor(desc, skb, offset, len);
1065 if (used <= len) {
1066 seq += used;
1067 copied += used;
1068 offset += used;
1069 }
1070 if (offset != skb->len)
1071 break;
1072 }
1073 if (skb->h.th->fin) {
Chris Leech624d1162006-05-23 18:01:28 -07001074 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 ++seq;
1076 break;
1077 }
Chris Leech624d1162006-05-23 18:01:28 -07001078 sk_eat_skb(sk, skb, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 if (!desc->count)
1080 break;
1081 }
1082 tp->copied_seq = seq;
1083
1084 tcp_rcv_space_adjust(sk);
1085
1086 /* Clean up data we have read: This will do ACK frames. */
1087 if (copied)
Chris Leech0e4b4992006-05-23 18:00:16 -07001088 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 return copied;
1090}
1091
1092/*
1093 * This routine copies from a sock struct into the user buffer.
1094 *
1095 * Technical note: in 2.3 we work on _locked_ socket, so that
1096 * tricks with *seq access order and skb->users are not required.
1097 * Probably, code can be easily improved even more.
1098 */
1099
1100int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1101 size_t len, int nonblock, int flags, int *addr_len)
1102{
1103 struct tcp_sock *tp = tcp_sk(sk);
1104 int copied = 0;
1105 u32 peek_seq;
1106 u32 *seq;
1107 unsigned long used;
1108 int err;
1109 int target; /* Read at least this many bytes */
1110 long timeo;
1111 struct task_struct *user_recv = NULL;
Chris Leech1a2449a2006-05-23 18:05:53 -07001112 int copied_early = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113
1114 lock_sock(sk);
1115
1116 TCP_CHECK_TIMER(sk);
1117
1118 err = -ENOTCONN;
1119 if (sk->sk_state == TCP_LISTEN)
1120 goto out;
1121
1122 timeo = sock_rcvtimeo(sk, nonblock);
1123
1124 /* Urgent data needs to be handled specially. */
1125 if (flags & MSG_OOB)
1126 goto recv_urg;
1127
1128 seq = &tp->copied_seq;
1129 if (flags & MSG_PEEK) {
1130 peek_seq = tp->copied_seq;
1131 seq = &peek_seq;
1132 }
1133
1134 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1135
Chris Leech1a2449a2006-05-23 18:05:53 -07001136#ifdef CONFIG_NET_DMA
1137 tp->ucopy.dma_chan = NULL;
1138 preempt_disable();
1139 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
Alexey Dobriyan29bbd722006-08-02 15:02:31 -07001140 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001141 preempt_enable_no_resched();
1142 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1143 } else
1144 preempt_enable_no_resched();
1145#endif
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 do {
1148 struct sk_buff *skb;
1149 u32 offset;
1150
1151 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1152 if (tp->urg_data && tp->urg_seq == *seq) {
1153 if (copied)
1154 break;
1155 if (signal_pending(current)) {
1156 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1157 break;
1158 }
1159 }
1160
1161 /* Next get a buffer. */
1162
1163 skb = skb_peek(&sk->sk_receive_queue);
1164 do {
1165 if (!skb)
1166 break;
1167
1168 /* Now that we have two receive queues this
1169 * shouldn't happen.
1170 */
1171 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1172 printk(KERN_INFO "recvmsg bug: copied %X "
1173 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1174 break;
1175 }
1176 offset = *seq - TCP_SKB_CB(skb)->seq;
1177 if (skb->h.th->syn)
1178 offset--;
1179 if (offset < skb->len)
1180 goto found_ok_skb;
1181 if (skb->h.th->fin)
1182 goto found_fin_ok;
1183 BUG_TRAP(flags & MSG_PEEK);
1184 skb = skb->next;
1185 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1186
1187 /* Well, if we have backlog, try to process it now yet. */
1188
1189 if (copied >= target && !sk->sk_backlog.tail)
1190 break;
1191
1192 if (copied) {
1193 if (sk->sk_err ||
1194 sk->sk_state == TCP_CLOSE ||
1195 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1196 !timeo ||
1197 signal_pending(current) ||
1198 (flags & MSG_PEEK))
1199 break;
1200 } else {
1201 if (sock_flag(sk, SOCK_DONE))
1202 break;
1203
1204 if (sk->sk_err) {
1205 copied = sock_error(sk);
1206 break;
1207 }
1208
1209 if (sk->sk_shutdown & RCV_SHUTDOWN)
1210 break;
1211
1212 if (sk->sk_state == TCP_CLOSE) {
1213 if (!sock_flag(sk, SOCK_DONE)) {
1214 /* This occurs when user tries to read
1215 * from never connected socket.
1216 */
1217 copied = -ENOTCONN;
1218 break;
1219 }
1220 break;
1221 }
1222
1223 if (!timeo) {
1224 copied = -EAGAIN;
1225 break;
1226 }
1227
1228 if (signal_pending(current)) {
1229 copied = sock_intr_errno(timeo);
1230 break;
1231 }
1232 }
1233
Chris Leech0e4b4992006-05-23 18:00:16 -07001234 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235
David S. Miller7df55122005-06-18 23:01:10 -07001236 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 /* Install new reader */
1238 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1239 user_recv = current;
1240 tp->ucopy.task = user_recv;
1241 tp->ucopy.iov = msg->msg_iov;
1242 }
1243
1244 tp->ucopy.len = len;
1245
1246 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1247 (flags & (MSG_PEEK | MSG_TRUNC)));
1248
1249 /* Ugly... If prequeue is not empty, we have to
1250 * process it before releasing socket, otherwise
1251 * order will be broken at second iteration.
1252 * More elegant solution is required!!!
1253 *
1254 * Look: we have the following (pseudo)queues:
1255 *
1256 * 1. packets in flight
1257 * 2. backlog
1258 * 3. prequeue
1259 * 4. receive_queue
1260 *
1261 * Each queue can be processed only if the next ones
1262 * are empty. At this point we have empty receive_queue.
1263 * But prequeue _can_ be not empty after 2nd iteration,
1264 * when we jumped to start of loop because backlog
1265 * processing added something to receive_queue.
1266 * We cannot release_sock(), because backlog contains
1267 * packets arrived _after_ prequeued ones.
1268 *
1269 * Shortly, algorithm is clear --- to process all
1270 * the queues in order. We could make it more directly,
1271 * requeueing packets from backlog to prequeue, if
1272 * is not empty. It is more elegant, but eats cycles,
1273 * unfortunately.
1274 */
David S. Millerb03efcf2005-07-08 14:57:23 -07001275 if (!skb_queue_empty(&tp->ucopy.prequeue))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 goto do_prequeue;
1277
1278 /* __ Set realtime policy in scheduler __ */
1279 }
1280
1281 if (copied >= target) {
1282 /* Do not sleep, just process backlog. */
1283 release_sock(sk);
1284 lock_sock(sk);
1285 } else
1286 sk_wait_data(sk, &timeo);
1287
Chris Leech1a2449a2006-05-23 18:05:53 -07001288#ifdef CONFIG_NET_DMA
1289 tp->ucopy.wakeup = 0;
1290#endif
1291
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292 if (user_recv) {
1293 int chunk;
1294
1295 /* __ Restore normal policy in scheduler __ */
1296
1297 if ((chunk = len - tp->ucopy.len) != 0) {
1298 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1299 len -= chunk;
1300 copied += chunk;
1301 }
1302
1303 if (tp->rcv_nxt == tp->copied_seq &&
David S. Millerb03efcf2005-07-08 14:57:23 -07001304 !skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305do_prequeue:
1306 tcp_prequeue_process(sk);
1307
1308 if ((chunk = len - tp->ucopy.len) != 0) {
1309 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1310 len -= chunk;
1311 copied += chunk;
1312 }
1313 }
1314 }
1315 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1316 if (net_ratelimit())
1317 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1318 current->comm, current->pid);
1319 peek_seq = tp->copied_seq;
1320 }
1321 continue;
1322
1323 found_ok_skb:
1324 /* Ok so how much can we use? */
1325 used = skb->len - offset;
1326 if (len < used)
1327 used = len;
1328
1329 /* Do we have urgent data here? */
1330 if (tp->urg_data) {
1331 u32 urg_offset = tp->urg_seq - *seq;
1332 if (urg_offset < used) {
1333 if (!urg_offset) {
1334 if (!sock_flag(sk, SOCK_URGINLINE)) {
1335 ++*seq;
1336 offset++;
1337 used--;
1338 if (!used)
1339 goto skip_copy;
1340 }
1341 } else
1342 used = urg_offset;
1343 }
1344 }
1345
1346 if (!(flags & MSG_TRUNC)) {
Chris Leech1a2449a2006-05-23 18:05:53 -07001347#ifdef CONFIG_NET_DMA
1348 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1349 tp->ucopy.dma_chan = get_softnet_dma();
1350
1351 if (tp->ucopy.dma_chan) {
1352 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1353 tp->ucopy.dma_chan, skb, offset,
1354 msg->msg_iov, used,
1355 tp->ucopy.pinned_list);
1356
1357 if (tp->ucopy.dma_cookie < 0) {
1358
1359 printk(KERN_ALERT "dma_cookie < 0\n");
1360
1361 /* Exception. Bailout! */
1362 if (!copied)
1363 copied = -EFAULT;
1364 break;
1365 }
1366 if ((offset + used) == skb->len)
1367 copied_early = 1;
1368
1369 } else
1370#endif
1371 {
1372 err = skb_copy_datagram_iovec(skb, offset,
1373 msg->msg_iov, used);
1374 if (err) {
1375 /* Exception. Bailout! */
1376 if (!copied)
1377 copied = -EFAULT;
1378 break;
1379 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 }
1381 }
1382
1383 *seq += used;
1384 copied += used;
1385 len -= used;
1386
1387 tcp_rcv_space_adjust(sk);
1388
1389skip_copy:
1390 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1391 tp->urg_data = 0;
1392 tcp_fast_path_check(sk, tp);
1393 }
1394 if (used + offset < skb->len)
1395 continue;
1396
1397 if (skb->h.th->fin)
1398 goto found_fin_ok;
Chris Leech1a2449a2006-05-23 18:05:53 -07001399 if (!(flags & MSG_PEEK)) {
1400 sk_eat_skb(sk, skb, copied_early);
1401 copied_early = 0;
1402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 continue;
1404
1405 found_fin_ok:
1406 /* Process the FIN. */
1407 ++*seq;
Chris Leech1a2449a2006-05-23 18:05:53 -07001408 if (!(flags & MSG_PEEK)) {
1409 sk_eat_skb(sk, skb, copied_early);
1410 copied_early = 0;
1411 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 break;
1413 } while (len > 0);
1414
1415 if (user_recv) {
David S. Millerb03efcf2005-07-08 14:57:23 -07001416 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 int chunk;
1418
1419 tp->ucopy.len = copied > 0 ? len : 0;
1420
1421 tcp_prequeue_process(sk);
1422
1423 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1424 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1425 len -= chunk;
1426 copied += chunk;
1427 }
1428 }
1429
1430 tp->ucopy.task = NULL;
1431 tp->ucopy.len = 0;
1432 }
1433
Chris Leech1a2449a2006-05-23 18:05:53 -07001434#ifdef CONFIG_NET_DMA
1435 if (tp->ucopy.dma_chan) {
1436 struct sk_buff *skb;
1437 dma_cookie_t done, used;
1438
1439 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1440
1441 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1442 tp->ucopy.dma_cookie, &done,
1443 &used) == DMA_IN_PROGRESS) {
1444 /* do partial cleanup of sk_async_wait_queue */
1445 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1446 (dma_async_is_complete(skb->dma_cookie, done,
1447 used) == DMA_SUCCESS)) {
1448 __skb_dequeue(&sk->sk_async_wait_queue);
1449 kfree_skb(skb);
1450 }
1451 }
1452
1453 /* Safe to free early-copied skbs now */
1454 __skb_queue_purge(&sk->sk_async_wait_queue);
1455 dma_chan_put(tp->ucopy.dma_chan);
1456 tp->ucopy.dma_chan = NULL;
1457 }
1458 if (tp->ucopy.pinned_list) {
1459 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1460 tp->ucopy.pinned_list = NULL;
1461 }
1462#endif
1463
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 /* According to UNIX98, msg_name/msg_namelen are ignored
1465 * on connected socket. I was just happy when found this 8) --ANK
1466 */
1467
1468 /* Clean up data we have read: This will do ACK frames. */
Chris Leech0e4b4992006-05-23 18:00:16 -07001469 tcp_cleanup_rbuf(sk, copied);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470
1471 TCP_CHECK_TIMER(sk);
1472 release_sock(sk);
1473 return copied;
1474
1475out:
1476 TCP_CHECK_TIMER(sk);
1477 release_sock(sk);
1478 return err;
1479
1480recv_urg:
1481 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1482 goto out;
1483}
1484
1485/*
1486 * State processing on a close. This implements the state shift for
1487 * sending our FIN frame. Note that we only send a FIN for some
1488 * states. A shutdown() may have already sent the FIN, or we may be
1489 * closed.
1490 */
1491
Arjan van de Ven9b5b5cf2005-11-29 16:21:38 -08001492static const unsigned char new_state[16] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 /* current state: new state: action: */
1494 /* (Invalid) */ TCP_CLOSE,
1495 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1496 /* TCP_SYN_SENT */ TCP_CLOSE,
1497 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1498 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1499 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1500 /* TCP_TIME_WAIT */ TCP_CLOSE,
1501 /* TCP_CLOSE */ TCP_CLOSE,
1502 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1503 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1504 /* TCP_LISTEN */ TCP_CLOSE,
1505 /* TCP_CLOSING */ TCP_CLOSING,
1506};
1507
1508static int tcp_close_state(struct sock *sk)
1509{
1510 int next = (int)new_state[sk->sk_state];
1511 int ns = next & TCP_STATE_MASK;
1512
1513 tcp_set_state(sk, ns);
1514
1515 return next & TCP_ACTION_FIN;
1516}
1517
1518/*
1519 * Shutdown the sending side of a connection. Much like close except
1520 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1521 */
1522
1523void tcp_shutdown(struct sock *sk, int how)
1524{
1525 /* We need to grab some memory, and put together a FIN,
1526 * and then put it into the queue to be sent.
1527 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1528 */
1529 if (!(how & SEND_SHUTDOWN))
1530 return;
1531
1532 /* If we've already sent a FIN, or it's a closed state, skip this. */
1533 if ((1 << sk->sk_state) &
1534 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1535 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1536 /* Clear out any half completed packets. FIN if needed. */
1537 if (tcp_close_state(sk))
1538 tcp_send_fin(sk);
1539 }
1540}
1541
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542void tcp_close(struct sock *sk, long timeout)
1543{
1544 struct sk_buff *skb;
1545 int data_was_unread = 0;
Herbert Xu75c2d9072006-05-03 23:31:35 -07001546 int state;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547
1548 lock_sock(sk);
1549 sk->sk_shutdown = SHUTDOWN_MASK;
1550
1551 if (sk->sk_state == TCP_LISTEN) {
1552 tcp_set_state(sk, TCP_CLOSE);
1553
1554 /* Special case. */
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001555 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001556
1557 goto adjudge_to_death;
1558 }
1559
1560 /* We need to flush the recv. buffs. We do this only on the
1561 * descriptor close, not protocol-sourced closes, because the
1562 * reader process may not have drained the data yet!
1563 */
1564 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1565 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1566 skb->h.th->fin;
1567 data_was_unread += len;
1568 __kfree_skb(skb);
1569 }
1570
1571 sk_stream_mem_reclaim(sk);
1572
1573 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1574 * 3.10, we send a RST here because data was lost. To
1575 * witness the awful effects of the old behavior of always
1576 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1577 * a bulk GET in an FTP client, suspend the process, wait
1578 * for the client to advertise a zero window, then kill -9
1579 * the FTP client, wheee... Note: timeout is always zero
1580 * in such a case.
1581 */
1582 if (data_was_unread) {
1583 /* Unread data was tossed, zap the connection. */
1584 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1585 tcp_set_state(sk, TCP_CLOSE);
1586 tcp_send_active_reset(sk, GFP_KERNEL);
1587 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1588 /* Check zero linger _after_ checking for unread data. */
1589 sk->sk_prot->disconnect(sk, 0);
1590 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1591 } else if (tcp_close_state(sk)) {
1592 /* We FIN if the application ate all the data before
1593 * zapping the connection.
1594 */
1595
1596 /* RED-PEN. Formally speaking, we have broken TCP state
1597 * machine. State transitions:
1598 *
1599 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1600 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1601 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1602 *
1603 * are legal only when FIN has been sent (i.e. in window),
1604 * rather than queued out of window. Purists blame.
1605 *
1606 * F.e. "RFC state" is ESTABLISHED,
1607 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1608 *
1609 * The visible declinations are that sometimes
1610 * we enter time-wait state, when it is not required really
1611 * (harmless), do not send active resets, when they are
1612 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1613 * they look as CLOSING or LAST_ACK for Linux)
1614 * Probably, I missed some more holelets.
1615 * --ANK
1616 */
1617 tcp_send_fin(sk);
1618 }
1619
1620 sk_stream_wait_close(sk, timeout);
1621
1622adjudge_to_death:
Herbert Xu75c2d9072006-05-03 23:31:35 -07001623 state = sk->sk_state;
1624 sock_hold(sk);
1625 sock_orphan(sk);
1626 atomic_inc(sk->sk_prot->orphan_count);
1627
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 /* It is the last release_sock in its life. It will remove backlog. */
1629 release_sock(sk);
1630
1631
1632 /* Now socket is owned by kernel and we acquire BH lock
1633 to finish close. No need to check for user refs.
1634 */
1635 local_bh_disable();
1636 bh_lock_sock(sk);
1637 BUG_TRAP(!sock_owned_by_user(sk));
1638
Herbert Xu75c2d9072006-05-03 23:31:35 -07001639 /* Have we already been destroyed by a softirq or backlog? */
1640 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1641 goto out;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001642
1643 /* This is a (useful) BSD violating of the RFC. There is a
1644 * problem with TCP as specified in that the other end could
1645 * keep a socket open forever with no application left this end.
1646 * We use a 3 minute timeout (about the same as BSD) then kill
1647 * our end. If they send after that then tough - BUT: long enough
1648 * that we won't make the old 4*rto = almost no time - whoops
1649 * reset mistake.
1650 *
1651 * Nope, it was not mistake. It is really desired behaviour
1652 * f.e. on http servers, when such sockets are useless, but
1653 * consume significant resources. Let's do it with special
1654 * linger2 option. --ANK
1655 */
1656
1657 if (sk->sk_state == TCP_FIN_WAIT2) {
1658 struct tcp_sock *tp = tcp_sk(sk);
1659 if (tp->linger2 < 0) {
1660 tcp_set_state(sk, TCP_CLOSE);
1661 tcp_send_active_reset(sk, GFP_ATOMIC);
1662 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1663 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001664 const int tmo = tcp_fin_time(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001665
1666 if (tmo > TCP_TIMEWAIT_LEN) {
David S. Miller52499af2006-07-31 22:32:09 -07001667 inet_csk_reset_keepalive_timer(sk,
1668 tmo - TCP_TIMEWAIT_LEN);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001669 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1671 goto out;
1672 }
1673 }
1674 }
1675 if (sk->sk_state != TCP_CLOSE) {
1676 sk_stream_mem_reclaim(sk);
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001677 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1679 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1680 if (net_ratelimit())
1681 printk(KERN_INFO "TCP: too many of orphaned "
1682 "sockets\n");
1683 tcp_set_state(sk, TCP_CLOSE);
1684 tcp_send_active_reset(sk, GFP_ATOMIC);
1685 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1686 }
1687 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001688
1689 if (sk->sk_state == TCP_CLOSE)
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001690 inet_csk_destroy_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001691 /* Otherwise, socket is reprieved until protocol close. */
1692
1693out:
1694 bh_unlock_sock(sk);
1695 local_bh_enable();
1696 sock_put(sk);
1697}
1698
1699/* These states need RST on ABORT according to RFC793 */
1700
1701static inline int tcp_need_reset(int state)
1702{
1703 return (1 << state) &
1704 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1705 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1706}
1707
1708int tcp_disconnect(struct sock *sk, int flags)
1709{
1710 struct inet_sock *inet = inet_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001711 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 struct tcp_sock *tp = tcp_sk(sk);
1713 int err = 0;
1714 int old_state = sk->sk_state;
1715
1716 if (old_state != TCP_CLOSE)
1717 tcp_set_state(sk, TCP_CLOSE);
1718
1719 /* ABORT function of RFC793 */
1720 if (old_state == TCP_LISTEN) {
Arnaldo Carvalho de Melo0a5578c2005-08-09 20:11:41 -07001721 inet_csk_listen_stop(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 } else if (tcp_need_reset(old_state) ||
1723 (tp->snd_nxt != tp->write_seq &&
1724 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -08001725 /* The last check adjusts for discrepancy of Linux wrt. RFC
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 * states
1727 */
1728 tcp_send_active_reset(sk, gfp_any());
1729 sk->sk_err = ECONNRESET;
1730 } else if (old_state == TCP_SYN_SENT)
1731 sk->sk_err = ECONNRESET;
1732
1733 tcp_clear_xmit_timers(sk);
1734 __skb_queue_purge(&sk->sk_receive_queue);
1735 sk_stream_writequeue_purge(sk);
1736 __skb_queue_purge(&tp->out_of_order_queue);
Chris Leech1a2449a2006-05-23 18:05:53 -07001737#ifdef CONFIG_NET_DMA
1738 __skb_queue_purge(&sk->sk_async_wait_queue);
1739#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
1741 inet->dport = 0;
1742
1743 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1744 inet_reset_saddr(sk);
1745
1746 sk->sk_shutdown = 0;
1747 sock_reset_flag(sk, SOCK_DONE);
1748 tp->srtt = 0;
1749 if ((tp->write_seq += tp->max_window + 2) == 0)
1750 tp->write_seq = 1;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001751 icsk->icsk_backoff = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 tp->snd_cwnd = 2;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001753 icsk->icsk_probes_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 tp->packets_out = 0;
1755 tp->snd_ssthresh = 0x7fffffff;
1756 tp->snd_cwnd_cnt = 0;
Stephen Hemminger9772efb2005-11-10 17:09:53 -08001757 tp->bytes_acked = 0;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001758 tcp_set_ca_state(sk, TCP_CA_Open);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 tcp_clear_retrans(tp);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001760 inet_csk_delack_init(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 sk->sk_send_head = NULL;
1762 tp->rx_opt.saw_tstamp = 0;
1763 tcp_sack_reset(&tp->rx_opt);
1764 __sk_dst_reset(sk);
1765
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001766 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767
1768 sk->sk_error_report(sk);
1769 return err;
1770}
1771
1772/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 * Socket option code for TCP.
1774 */
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001775static int do_tcp_setsockopt(struct sock *sk, int level,
1776 int optname, char __user *optval, int optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777{
1778 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001779 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780 int val;
1781 int err = 0;
1782
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001783 /* This is a string value all the others are int's */
1784 if (optname == TCP_CONGESTION) {
1785 char name[TCP_CA_NAME_MAX];
1786
1787 if (optlen < 1)
1788 return -EINVAL;
1789
1790 val = strncpy_from_user(name, optval,
1791 min(TCP_CA_NAME_MAX-1, optlen));
1792 if (val < 0)
1793 return -EFAULT;
1794 name[val] = 0;
1795
1796 lock_sock(sk);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001797 err = tcp_set_congestion_control(sk, name);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07001798 release_sock(sk);
1799 return err;
1800 }
1801
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 if (optlen < sizeof(int))
1803 return -EINVAL;
1804
1805 if (get_user(val, (int __user *)optval))
1806 return -EFAULT;
1807
1808 lock_sock(sk);
1809
1810 switch (optname) {
1811 case TCP_MAXSEG:
1812 /* Values greater than interface MTU won't take effect. However
1813 * at the point when this call is done we typically don't yet
1814 * know which interface is going to be used */
1815 if (val < 8 || val > MAX_TCP_WINDOW) {
1816 err = -EINVAL;
1817 break;
1818 }
1819 tp->rx_opt.user_mss = val;
1820 break;
1821
1822 case TCP_NODELAY:
1823 if (val) {
1824 /* TCP_NODELAY is weaker than TCP_CORK, so that
1825 * this option on corked socket is remembered, but
1826 * it is not activated until cork is cleared.
1827 *
1828 * However, when TCP_NODELAY is set we make
1829 * an explicit push, which overrides even TCP_CORK
1830 * for currently queued segments.
1831 */
1832 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1833 tcp_push_pending_frames(sk, tp);
1834 } else {
1835 tp->nonagle &= ~TCP_NAGLE_OFF;
1836 }
1837 break;
1838
1839 case TCP_CORK:
1840 /* When set indicates to always queue non-full frames.
1841 * Later the user clears this option and we transmit
1842 * any pending partial frames in the queue. This is
1843 * meant to be used alongside sendfile() to get properly
1844 * filled frames when the user (for example) must write
1845 * out headers with a write() call first and then use
1846 * sendfile to send out the data parts.
1847 *
1848 * TCP_CORK can be set together with TCP_NODELAY and it is
1849 * stronger than TCP_NODELAY.
1850 */
1851 if (val) {
1852 tp->nonagle |= TCP_NAGLE_CORK;
1853 } else {
1854 tp->nonagle &= ~TCP_NAGLE_CORK;
1855 if (tp->nonagle&TCP_NAGLE_OFF)
1856 tp->nonagle |= TCP_NAGLE_PUSH;
1857 tcp_push_pending_frames(sk, tp);
1858 }
1859 break;
1860
1861 case TCP_KEEPIDLE:
1862 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1863 err = -EINVAL;
1864 else {
1865 tp->keepalive_time = val * HZ;
1866 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1867 !((1 << sk->sk_state) &
1868 (TCPF_CLOSE | TCPF_LISTEN))) {
1869 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1870 if (tp->keepalive_time > elapsed)
1871 elapsed = tp->keepalive_time - elapsed;
1872 else
1873 elapsed = 0;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001874 inet_csk_reset_keepalive_timer(sk, elapsed);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875 }
1876 }
1877 break;
1878 case TCP_KEEPINTVL:
1879 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1880 err = -EINVAL;
1881 else
1882 tp->keepalive_intvl = val * HZ;
1883 break;
1884 case TCP_KEEPCNT:
1885 if (val < 1 || val > MAX_TCP_KEEPCNT)
1886 err = -EINVAL;
1887 else
1888 tp->keepalive_probes = val;
1889 break;
1890 case TCP_SYNCNT:
1891 if (val < 1 || val > MAX_TCP_SYNCNT)
1892 err = -EINVAL;
1893 else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001894 icsk->icsk_syn_retries = val;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895 break;
1896
1897 case TCP_LINGER2:
1898 if (val < 0)
1899 tp->linger2 = -1;
1900 else if (val > sysctl_tcp_fin_timeout / HZ)
1901 tp->linger2 = 0;
1902 else
1903 tp->linger2 = val * HZ;
1904 break;
1905
1906 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001907 icsk->icsk_accept_queue.rskq_defer_accept = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001908 if (val > 0) {
1909 /* Translate value in seconds to number of
1910 * retransmits */
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001911 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912 val > ((TCP_TIMEOUT_INIT / HZ) <<
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07001913 icsk->icsk_accept_queue.rskq_defer_accept))
1914 icsk->icsk_accept_queue.rskq_defer_accept++;
1915 icsk->icsk_accept_queue.rskq_defer_accept++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 }
1917 break;
1918
1919 case TCP_WINDOW_CLAMP:
1920 if (!val) {
1921 if (sk->sk_state != TCP_CLOSE) {
1922 err = -EINVAL;
1923 break;
1924 }
1925 tp->window_clamp = 0;
1926 } else
1927 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1928 SOCK_MIN_RCVBUF / 2 : val;
1929 break;
1930
1931 case TCP_QUICKACK:
1932 if (!val) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001933 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 } else {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001935 icsk->icsk_ack.pingpong = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936 if ((1 << sk->sk_state) &
1937 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001938 inet_csk_ack_scheduled(sk)) {
1939 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
Chris Leech0e4b4992006-05-23 18:00:16 -07001940 tcp_cleanup_rbuf(sk, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001941 if (!(val & 1))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001942 icsk->icsk_ack.pingpong = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943 }
1944 }
1945 break;
1946
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001947#ifdef CONFIG_TCP_MD5SIG
1948 case TCP_MD5SIG:
1949 /* Read the IP->Key mappings from userspace */
1950 err = tp->af_specific->md5_parse(sk, optval, optlen);
1951 break;
1952#endif
1953
Linus Torvalds1da177e2005-04-16 15:20:36 -07001954 default:
1955 err = -ENOPROTOOPT;
1956 break;
1957 };
1958 release_sock(sk);
1959 return err;
1960}
1961
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001962int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1963 int optlen)
1964{
1965 struct inet_connection_sock *icsk = inet_csk(sk);
1966
1967 if (level != SOL_TCP)
1968 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1969 optval, optlen);
1970 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1971}
1972
1973#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001974int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1975 char __user *optval, int optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001976{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08001977 if (level != SOL_TCP)
1978 return inet_csk_compat_setsockopt(sk, level, optname,
1979 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001980 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1981}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08001982
1983EXPORT_SYMBOL(compat_tcp_setsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08001984#endif
1985
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986/* Return information about state of tcp endpoint in API format. */
1987void tcp_get_info(struct sock *sk, struct tcp_info *info)
1988{
1989 struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001990 const struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001991 u32 now = tcp_time_stamp;
1992
1993 memset(info, 0, sizeof(*info));
1994
1995 info->tcpi_state = sk->sk_state;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001996 info->tcpi_ca_state = icsk->icsk_ca_state;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001997 info->tcpi_retransmits = icsk->icsk_retransmits;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03001998 info->tcpi_probes = icsk->icsk_probes_out;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001999 info->tcpi_backoff = icsk->icsk_backoff;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000
2001 if (tp->rx_opt.tstamp_ok)
2002 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2003 if (tp->rx_opt.sack_ok)
2004 info->tcpi_options |= TCPI_OPT_SACK;
2005 if (tp->rx_opt.wscale_ok) {
2006 info->tcpi_options |= TCPI_OPT_WSCALE;
2007 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2008 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2009 }
2010
2011 if (tp->ecn_flags&TCP_ECN_OK)
2012 info->tcpi_options |= TCPI_OPT_ECN;
2013
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002014 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2015 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002016 info->tcpi_snd_mss = tp->mss_cache;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002017 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018
2019 info->tcpi_unacked = tp->packets_out;
2020 info->tcpi_sacked = tp->sacked_out;
2021 info->tcpi_lost = tp->lost_out;
2022 info->tcpi_retrans = tp->retrans_out;
2023 info->tcpi_fackets = tp->fackets_out;
2024
2025 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07002026 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002027 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2028
Arnaldo Carvalho de Melod83d8462005-12-13 23:26:10 -08002029 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2031 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2032 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2033 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2034 info->tcpi_snd_cwnd = tp->snd_cwnd;
2035 info->tcpi_advmss = tp->advmss;
2036 info->tcpi_reordering = tp->reordering;
2037
2038 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2039 info->tcpi_rcv_space = tp->rcvq_space.space;
2040
2041 info->tcpi_total_retrans = tp->total_retrans;
2042}
2043
2044EXPORT_SYMBOL_GPL(tcp_get_info);
2045
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002046static int do_tcp_getsockopt(struct sock *sk, int level,
2047 int optname, char __user *optval, int __user *optlen)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048{
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002049 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 struct tcp_sock *tp = tcp_sk(sk);
2051 int val, len;
2052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 if (get_user(len, optlen))
2054 return -EFAULT;
2055
2056 len = min_t(unsigned int, len, sizeof(int));
2057
2058 if (len < 0)
2059 return -EINVAL;
2060
2061 switch (optname) {
2062 case TCP_MAXSEG:
David S. Millerc1b4a7e2005-07-05 15:24:38 -07002063 val = tp->mss_cache;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2065 val = tp->rx_opt.user_mss;
2066 break;
2067 case TCP_NODELAY:
2068 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2069 break;
2070 case TCP_CORK:
2071 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2072 break;
2073 case TCP_KEEPIDLE:
2074 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2075 break;
2076 case TCP_KEEPINTVL:
2077 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2078 break;
2079 case TCP_KEEPCNT:
2080 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2081 break;
2082 case TCP_SYNCNT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002083 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 break;
2085 case TCP_LINGER2:
2086 val = tp->linger2;
2087 if (val >= 0)
2088 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2089 break;
2090 case TCP_DEFER_ACCEPT:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002091 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2092 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093 break;
2094 case TCP_WINDOW_CLAMP:
2095 val = tp->window_clamp;
2096 break;
2097 case TCP_INFO: {
2098 struct tcp_info info;
2099
2100 if (get_user(len, optlen))
2101 return -EFAULT;
2102
2103 tcp_get_info(sk, &info);
2104
2105 len = min_t(unsigned int, len, sizeof(info));
2106 if (put_user(len, optlen))
2107 return -EFAULT;
2108 if (copy_to_user(optval, &info, len))
2109 return -EFAULT;
2110 return 0;
2111 }
2112 case TCP_QUICKACK:
Arnaldo Carvalho de Melo295f7322005-08-09 20:11:56 -07002113 val = !icsk->icsk_ack.pingpong;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 break;
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002115
2116 case TCP_CONGESTION:
2117 if (get_user(len, optlen))
2118 return -EFAULT;
2119 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2120 if (put_user(len, optlen))
2121 return -EFAULT;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -03002122 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002123 return -EFAULT;
2124 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 default:
2126 return -ENOPROTOOPT;
2127 };
2128
2129 if (put_user(len, optlen))
2130 return -EFAULT;
2131 if (copy_to_user(optval, &val, len))
2132 return -EFAULT;
2133 return 0;
2134}
2135
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002136int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2137 int __user *optlen)
2138{
2139 struct inet_connection_sock *icsk = inet_csk(sk);
2140
2141 if (level != SOL_TCP)
2142 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2143 optval, optlen);
2144 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2145}
2146
2147#ifdef CONFIG_COMPAT
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002148int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2149 char __user *optval, int __user *optlen)
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002150{
Arnaldo Carvalho de Melodec73ff2006-03-20 22:46:16 -08002151 if (level != SOL_TCP)
2152 return inet_csk_compat_getsockopt(sk, level, optname,
2153 optval, optlen);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002154 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2155}
Arnaldo Carvalho de Melo543d9cf2006-03-20 22:48:35 -08002156
2157EXPORT_SYMBOL(compat_tcp_getsockopt);
Dmitry Mishin3fdadf72006-03-20 22:45:21 -08002158#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159
Herbert Xu576a30e2006-06-27 13:22:38 -07002160struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
Herbert Xuf4c50d92006-06-22 03:02:40 -07002161{
2162 struct sk_buff *segs = ERR_PTR(-EINVAL);
2163 struct tcphdr *th;
2164 unsigned thlen;
2165 unsigned int seq;
Al Virod3bc23e2006-11-14 21:24:49 -08002166 __be32 delta;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002167 unsigned int oldlen;
2168 unsigned int len;
2169
2170 if (!pskb_may_pull(skb, sizeof(*th)))
2171 goto out;
2172
2173 th = skb->h.th;
2174 thlen = th->doff * 4;
2175 if (thlen < sizeof(*th))
2176 goto out;
2177
2178 if (!pskb_may_pull(skb, thlen))
2179 goto out;
2180
Herbert Xu0718bcc2006-06-25 23:55:46 -07002181 oldlen = (u16)~skb->len;
Herbert Xuf4c50d92006-06-22 03:02:40 -07002182 __skb_pull(skb, thlen);
2183
Herbert Xu3820c3f2006-06-29 20:11:25 -07002184 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2185 /* Packet is from an untrusted source, reset gso_segs. */
Herbert Xubbcf4672006-07-03 19:38:35 -07002186 int type = skb_shinfo(skb)->gso_type;
2187 int mss;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002188
Herbert Xubbcf4672006-07-03 19:38:35 -07002189 if (unlikely(type &
2190 ~(SKB_GSO_TCPV4 |
2191 SKB_GSO_DODGY |
2192 SKB_GSO_TCP_ECN |
2193 SKB_GSO_TCPV6 |
2194 0) ||
2195 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2196 goto out;
2197
2198 mss = skb_shinfo(skb)->gso_size;
Herbert Xu3820c3f2006-06-29 20:11:25 -07002199 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2200
2201 segs = NULL;
2202 goto out;
2203 }
2204
Herbert Xu576a30e2006-06-27 13:22:38 -07002205 segs = skb_segment(skb, features);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002206 if (IS_ERR(segs))
2207 goto out;
2208
2209 len = skb_shinfo(skb)->gso_size;
Herbert Xu0718bcc2006-06-25 23:55:46 -07002210 delta = htonl(oldlen + (thlen + len));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002211
2212 skb = segs;
2213 th = skb->h.th;
2214 seq = ntohl(th->seq);
2215
2216 do {
2217 th->fin = th->psh = 0;
2218
Al Virod3bc23e2006-11-14 21:24:49 -08002219 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2220 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002221 if (skb->ip_summed != CHECKSUM_PARTIAL)
Herbert Xu0718bcc2006-06-25 23:55:46 -07002222 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2223 skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002224
2225 seq += len;
2226 skb = skb->next;
2227 th = skb->h.th;
2228
2229 th->seq = htonl(seq);
2230 th->cwr = 0;
2231 } while (skb->next);
2232
Herbert Xu0718bcc2006-06-25 23:55:46 -07002233 delta = htonl(oldlen + (skb->tail - skb->h.raw) + skb->data_len);
Al Virod3bc23e2006-11-14 21:24:49 -08002234 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2235 (__force u32)delta));
Patrick McHardy84fa7932006-08-29 16:44:56 -07002236 if (skb->ip_summed != CHECKSUM_PARTIAL)
Herbert Xu0718bcc2006-06-25 23:55:46 -07002237 th->check = csum_fold(csum_partial(skb->h.raw, thlen,
2238 skb->csum));
Herbert Xuf4c50d92006-06-22 03:02:40 -07002239
2240out:
2241 return segs;
2242}
Herbert Xuadcfc7d2006-06-30 13:36:15 -07002243EXPORT_SYMBOL(tcp_tso_segment);
Herbert Xuf4c50d92006-06-22 03:02:40 -07002244
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002245#ifdef CONFIG_TCP_MD5SIG
2246static unsigned long tcp_md5sig_users;
2247static struct tcp_md5sig_pool **tcp_md5sig_pool;
2248static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2249
2250static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2251{
2252 int cpu;
2253 for_each_possible_cpu(cpu) {
2254 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2255 if (p) {
2256 if (p->md5_desc.tfm)
2257 crypto_free_hash(p->md5_desc.tfm);
2258 kfree(p);
2259 p = NULL;
2260 }
2261 }
2262 free_percpu(pool);
2263}
2264
2265void tcp_free_md5sig_pool(void)
2266{
2267 struct tcp_md5sig_pool **pool = NULL;
2268
2269 spin_lock(&tcp_md5sig_pool_lock);
2270 if (--tcp_md5sig_users == 0) {
2271 pool = tcp_md5sig_pool;
2272 tcp_md5sig_pool = NULL;
2273 }
2274 spin_unlock(&tcp_md5sig_pool_lock);
2275 if (pool)
2276 __tcp_free_md5sig_pool(pool);
2277}
2278
2279EXPORT_SYMBOL(tcp_free_md5sig_pool);
2280
Adrian Bunkf5b99bc2006-11-30 17:22:29 -08002281static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08002282{
2283 int cpu;
2284 struct tcp_md5sig_pool **pool;
2285
2286 pool = alloc_percpu(struct tcp_md5sig_pool *);
2287 if (!pool)
2288 return NULL;
2289
2290 for_each_possible_cpu(cpu) {
2291 struct tcp_md5sig_pool *p;
2292 struct crypto_hash *hash;
2293
2294 p = kzalloc(sizeof(*p), GFP_KERNEL);
2295 if (!p)
2296 goto out_free;
2297 *per_cpu_ptr(pool, cpu) = p;
2298
2299 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2300 if (!hash || IS_ERR(hash))
2301 goto out_free;
2302
2303 p->md5_desc.tfm = hash;
2304 }
2305 return pool;
2306out_free:
2307 __tcp_free_md5sig_pool(pool);
2308 return NULL;
2309}
2310
2311struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2312{
2313 struct tcp_md5sig_pool **pool;
2314 int alloc = 0;
2315
2316retry:
2317 spin_lock(&tcp_md5sig_pool_lock);
2318 pool = tcp_md5sig_pool;
2319 if (tcp_md5sig_users++ == 0) {
2320 alloc = 1;
2321 spin_unlock(&tcp_md5sig_pool_lock);
2322 } else if (!pool) {
2323 tcp_md5sig_users--;
2324 spin_unlock(&tcp_md5sig_pool_lock);
2325 cpu_relax();
2326 goto retry;
2327 } else
2328 spin_unlock(&tcp_md5sig_pool_lock);
2329
2330 if (alloc) {
2331 /* we cannot hold spinlock here because this may sleep. */
2332 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2333 spin_lock(&tcp_md5sig_pool_lock);
2334 if (!p) {
2335 tcp_md5sig_users--;
2336 spin_unlock(&tcp_md5sig_pool_lock);
2337 return NULL;
2338 }
2339 pool = tcp_md5sig_pool;
2340 if (pool) {
2341 /* oops, it has already been assigned. */
2342 spin_unlock(&tcp_md5sig_pool_lock);
2343 __tcp_free_md5sig_pool(p);
2344 } else {
2345 tcp_md5sig_pool = pool = p;
2346 spin_unlock(&tcp_md5sig_pool_lock);
2347 }
2348 }
2349 return pool;
2350}
2351
2352EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2353
2354struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2355{
2356 struct tcp_md5sig_pool **p;
2357 spin_lock(&tcp_md5sig_pool_lock);
2358 p = tcp_md5sig_pool;
2359 if (p)
2360 tcp_md5sig_users++;
2361 spin_unlock(&tcp_md5sig_pool_lock);
2362 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2363}
2364
2365EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2366
2367void __tcp_put_md5sig_pool(void) {
2368 __tcp_free_md5sig_pool(tcp_md5sig_pool);
2369}
2370
2371EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2372#endif
2373
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374extern void __skb_cb_too_small_for_tcp(int, int);
Stephen Hemminger5f8ef482005-06-23 20:37:36 -07002375extern struct tcp_congestion_ops tcp_reno;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
2377static __initdata unsigned long thash_entries;
2378static int __init set_thash_entries(char *str)
2379{
2380 if (!str)
2381 return 0;
2382 thash_entries = simple_strtoul(str, &str, 0);
2383 return 1;
2384}
2385__setup("thash_entries=", set_thash_entries);
2386
2387void __init tcp_init(void)
2388{
2389 struct sk_buff *skb = NULL;
John Heffner7b4f4b52006-03-25 01:34:07 -08002390 unsigned long limit;
2391 int order, i, max_share;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392
2393 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2394 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2395 sizeof(skb->cb));
2396
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002397 tcp_hashinfo.bind_bucket_cachep =
2398 kmem_cache_create("tcp_bind_bucket",
2399 sizeof(struct inet_bind_bucket), 0,
Alexey Dobriyane5d679f332006-08-26 19:25:52 -07002400 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402 /* Size and allocate the main established and bind bucket
2403 * hash tables.
2404 *
2405 * The methodology is similar to that of the buffer cache.
2406 */
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002407 tcp_hashinfo.ehash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 alloc_large_system_hash("TCP established",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002409 sizeof(struct inet_ehash_bucket),
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 thash_entries,
2411 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002412 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002413 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002414 &tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415 NULL,
2416 0);
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002417 tcp_hashinfo.ehash_size = (1 << tcp_hashinfo.ehash_size) >> 1;
2418 for (i = 0; i < (tcp_hashinfo.ehash_size << 1); i++) {
2419 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2420 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 }
2422
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002423 tcp_hashinfo.bhash =
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424 alloc_large_system_hash("TCP bind",
Arnaldo Carvalho de Melo0f7ff922005-08-09 19:59:44 -07002425 sizeof(struct inet_bind_hashbucket),
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002426 tcp_hashinfo.ehash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427 (num_physpages >= 128 * 1024) ?
Mike Stroyan18955cf2005-11-29 16:12:55 -08002428 13 : 15,
John Heffner9e950ef2006-11-06 23:10:51 -08002429 0,
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002430 &tcp_hashinfo.bhash_size,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002431 NULL,
2432 64 * 1024);
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002433 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2434 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2435 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2436 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 }
2438
2439 /* Try to be a bit smarter and adjust defaults depending
2440 * on available memory.
2441 */
2442 for (order = 0; ((1 << order) << PAGE_SHIFT) <
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002443 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002444 order++)
2445 ;
Andi Kleene7626482005-06-13 14:24:52 -07002446 if (order >= 4) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002447 sysctl_local_port_range[0] = 32768;
2448 sysctl_local_port_range[1] = 61000;
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002449 tcp_death_row.sysctl_max_tw_buckets = 180000;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002450 sysctl_tcp_max_orphans = 4096 << (order - 4);
2451 sysctl_max_syn_backlog = 1024;
2452 } else if (order < 3) {
2453 sysctl_local_port_range[0] = 1024 * (3 - order);
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -07002454 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455 sysctl_tcp_max_orphans >>= (3 - order);
2456 sysctl_max_syn_backlog = 128;
2457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458
John Heffner52bf3762006-11-14 20:25:17 -08002459 /* Allow no more than 3/4 kernel memory (usually less) allocated to TCP */
2460 sysctl_tcp_mem[0] = (1536 / sizeof (struct inet_bind_hashbucket)) << order;
2461 sysctl_tcp_mem[1] = sysctl_tcp_mem[0] * 4 / 3;
2462 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002463
John Heffner7b4f4b52006-03-25 01:34:07 -08002464 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2465 max_share = min(4UL*1024*1024, limit);
2466
2467 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2468 sysctl_tcp_wmem[1] = 16*1024;
2469 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2470
2471 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2472 sysctl_tcp_rmem[1] = 87380;
2473 sysctl_tcp_rmem[2] = max(87380, max_share);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474
2475 printk(KERN_INFO "TCP: Hash tables configured "
2476 "(established %d bind %d)\n",
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -07002477 tcp_hashinfo.ehash_size << 1, tcp_hashinfo.bhash_size);
Stephen Hemminger317a76f2005-06-23 12:19:55 -07002478
2479 tcp_register_congestion_control(&tcp_reno);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480}
2481
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482EXPORT_SYMBOL(tcp_close);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483EXPORT_SYMBOL(tcp_disconnect);
2484EXPORT_SYMBOL(tcp_getsockopt);
2485EXPORT_SYMBOL(tcp_ioctl);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486EXPORT_SYMBOL(tcp_poll);
2487EXPORT_SYMBOL(tcp_read_sock);
2488EXPORT_SYMBOL(tcp_recvmsg);
2489EXPORT_SYMBOL(tcp_sendmsg);
2490EXPORT_SYMBOL(tcp_sendpage);
2491EXPORT_SYMBOL(tcp_setsockopt);
2492EXPORT_SYMBOL(tcp_shutdown);
2493EXPORT_SYMBOL(tcp_statistics);