blob: 1f57c536349251f5e0cf1847a9518c0b51ccaeb1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Definitions for the TCP module.
7 *
8 * Version: @(#)tcp.h 1.0.5 05/23/93
9 *
Jesper Juhl02c30a82005-05-05 16:16:16 -070010 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -070011 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18#ifndef _TCP_H
19#define _TCP_H
20
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#define FASTRETRANS_DEBUG 1
22
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <linux/list.h>
24#include <linux/tcp.h>
Paul Gortmaker187f1882011-11-23 20:12:59 -050025#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026#include <linux/slab.h>
27#include <linux/cache.h>
28#include <linux/percpu.h>
Herbert Xufb286bb2005-11-10 13:01:24 -080029#include <linux/skbuff.h>
Chris Leech97fc2f02006-05-23 17:55:33 -070030#include <linux/dmaengine.h>
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -080031#include <linux/crypto.h>
Glenn Griffinc6aefaf2008-02-07 21:49:26 -080032#include <linux/cryptohash.h>
William Allen Simpson435cf552009-12-02 18:17:05 +000033#include <linux/kref.h>
Eric Dumazet740b0f12014-02-26 14:02:48 -080034#include <linux/ktime.h>
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -070035
36#include <net/inet_connection_sock.h>
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -070037#include <net/inet_timewait_sock.h>
Arnaldo Carvalho de Melo77d8bf92005-08-09 20:00:51 -070038#include <net/inet_hashtables.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#include <net/checksum.h>
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -070040#include <net/request_sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041#include <net/sock.h>
42#include <net/snmp.h>
43#include <net/ip.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070044#include <net/tcp_states.h>
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -070045#include <net/inet_ecn.h>
Satoru SATOH0c266892009-05-04 11:11:01 -070046#include <net/dst.h>
Arnaldo Carvalho de Meloc752f072005-08-09 20:08:28 -070047
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <linux/seq_file.h>
Glauber Costa180d8cd2011-12-11 21:47:02 +000049#include <linux/memcontrol.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Arnaldo Carvalho de Melo6e04e022005-08-09 20:07:35 -070051extern struct inet_hashinfo tcp_hashinfo;
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Eric Dumazetdd24c002008-11-25 21:17:14 -080053extern struct percpu_counter tcp_orphan_count;
Joe Perches5c9f3022013-09-23 11:33:32 -070054void tcp_time_wait(struct sock *sk, int state, int timeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055
Linus Torvalds1da177e2005-04-16 15:20:36 -070056#define MAX_TCP_HEADER (128 + MAX_HEADER)
Adam Langley33ad7982008-07-19 00:04:31 -070057#define MAX_TCP_OPTION_SPACE 40
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59/*
60 * Never offer a window over 32767 without using window scaling. Some
61 * poor stacks do signed 16bit maths!
62 */
63#define MAX_TCP_WINDOW 32767U
64
65/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
66#define TCP_MIN_MSS 88U
67
John Heffner5d424d52006-03-20 17:53:41 -080068/* The least MTU to use for probing */
69#define TCP_BASE_MSS 512
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071/* After receiving this amount of duplicate ACKs fast retransmit starts. */
72#define TCP_FASTRETRANS_THRESH 3
73
74/* Maximal reordering. */
75#define TCP_MAX_REORDERING 127
76
77/* Maximal number of ACKs sent quickly to accelerate slow-start. */
78#define TCP_MAX_QUICKACKS 16U
79
80/* urg_data states */
81#define TCP_URG_VALID 0x0100
82#define TCP_URG_NOTYET 0x0200
83#define TCP_URG_READ 0x0400
84
85#define TCP_RETR1 3 /*
86 * This is how many retries it does before it
87 * tries to figure out if the gateway is
88 * down. Minimal RFC value is 3; it corresponds
89 * to ~3sec-8min depending on RTO.
90 */
91
92#define TCP_RETR2 15 /*
93 * This should take at least
94 * 90 minutes to time out.
95 * RFC1122 says that the limit is 100 sec.
96 * 15 is ~13-30min depending on RTO.
97 */
98
Alex Bergmann6c9ff972012-08-31 02:48:31 +000099#define TCP_SYN_RETRIES 6 /* This is how many retries are done
100 * when active opening a connection.
101 * RFC1122 says the minimum retry MUST
102 * be at least 180secs. Nevertheless
103 * this value is corresponding to
104 * 63secs of retransmission with the
105 * current initial RTO.
106 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
Alex Bergmann6c9ff972012-08-31 02:48:31 +0000108#define TCP_SYNACK_RETRIES 5 /* This is how may retries are done
109 * when passive opening a connection.
110 * This is corresponding to 31secs of
111 * retransmission with the current
112 * initial RTO.
113 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
116 * state, about 60 seconds */
117#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
118 /* BSD style FIN_WAIT2 deadlock breaker.
119 * It used to be 3min, new value is 60sec,
120 * to combine FIN-WAIT-2 timeout with
121 * TIME-WAIT timer.
122 */
123
124#define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
125#if HZ >= 100
126#define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
127#define TCP_ATO_MIN ((unsigned)(HZ/25))
128#else
129#define TCP_DELACK_MIN 4U
130#define TCP_ATO_MIN 4U
131#endif
132#define TCP_RTO_MAX ((unsigned)(120*HZ))
133#define TCP_RTO_MIN ((unsigned)(HZ/5))
Eric Dumazetfd4f2ce2012-04-12 19:48:40 +0000134#define TCP_TIMEOUT_INIT ((unsigned)(1*HZ)) /* RFC6298 2.1 initial RTO value */
Jerry Chu9ad7c042011-06-08 11:08:38 +0000135#define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
136 * used as a fallback RTO for the
137 * initial data transmission if no
138 * valid RTT sample has been acquired,
139 * most likely due to retrans in 3WHS.
140 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141
142#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
143 * for local resources.
144 */
145
146#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
147#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
148#define TCP_KEEPALIVE_INTVL (75*HZ)
149
150#define MAX_TCP_KEEPIDLE 32767
151#define MAX_TCP_KEEPINTVL 32767
152#define MAX_TCP_KEEPCNT 127
153#define MAX_TCP_SYNCNT 127
154
155#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156
157#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
158#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
159 * after this time. It should be equal
160 * (or greater than) TCP_TIMEWAIT_LEN
161 * to provide reliability equal to one
162 * provided by timewait state.
163 */
164#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
165 * timestamps. It must be less than
166 * minimal timewait lifetime.
167 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168/*
169 * TCP option
170 */
171
172#define TCPOPT_NOP 1 /* Padding */
173#define TCPOPT_EOL 0 /* End of options */
174#define TCPOPT_MSS 2 /* Segment size negotiating */
175#define TCPOPT_WINDOW 3 /* Window scaling */
176#define TCPOPT_SACK_PERM 4 /* SACK Permitted */
177#define TCPOPT_SACK 5 /* SACK Block */
178#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800179#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
Yuchung Cheng2100c8d2012-07-19 06:43:05 +0000180#define TCPOPT_EXP 254 /* Experimental */
181/* Magic number to be after the option value for sharing TCP
182 * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
183 */
184#define TCPOPT_FASTOPEN_MAGIC 0xF989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
186/*
187 * TCP option lengths
188 */
189
190#define TCPOLEN_MSS 4
191#define TCPOLEN_WINDOW 3
192#define TCPOLEN_SACK_PERM 2
193#define TCPOLEN_TIMESTAMP 10
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800194#define TCPOLEN_MD5SIG 18
Yuchung Cheng2100c8d2012-07-19 06:43:05 +0000195#define TCPOLEN_EXP_FASTOPEN_BASE 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197/* But this is what stacks really send out. */
198#define TCPOLEN_TSTAMP_ALIGNED 12
199#define TCPOLEN_WSCALE_ALIGNED 4
200#define TCPOLEN_SACKPERM_ALIGNED 4
201#define TCPOLEN_SACK_BASE 2
202#define TCPOLEN_SACK_BASE_ALIGNED 4
203#define TCPOLEN_SACK_PERBLOCK 8
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -0800204#define TCPOLEN_MD5SIG_ALIGNED 20
Adam Langley33ad7982008-07-19 00:04:31 -0700205#define TCPOLEN_MSS_ALIGNED 4
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207/* Flags in tp->nonagle */
208#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
209#define TCP_NAGLE_CORK 2 /* Socket is corked */
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800210#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000212/* TCP thin-stream limits */
213#define TCP_THIN_LINEAR_RETRIES 6 /* After 6 linear retries, do exp. backoff */
214
David S. Miller7eb38522011-02-05 18:13:45 -0800215/* TCP initial congestion window as per draft-hkchu-tcpm-initcwnd-01 */
David S. Miller442b9632011-02-02 17:05:11 -0800216#define TCP_INIT_CWND 10
217
Yuchung Chengcf60af02012-07-19 06:43:09 +0000218/* Bit Flags for sysctl_tcp_fastopen */
219#define TFO_CLIENT_ENABLE 1
Jerry Chu10467162012-08-31 12:29:11 +0000220#define TFO_SERVER_ENABLE 2
Yuchung Cheng67da22d2012-07-19 06:43:11 +0000221#define TFO_CLIENT_NO_COOKIE 4 /* Data in SYN w/o cookie option */
Yuchung Chengcf60af02012-07-19 06:43:09 +0000222
Jerry Chu10467162012-08-31 12:29:11 +0000223/* Accept SYN data w/o any cookie option */
224#define TFO_SERVER_COOKIE_NOT_REQD 0x200
225
226/* Force enable TFO on all listeners, i.e., not requiring the
227 * TCP_FASTOPEN socket option. SOCKOPT1/2 determine how to set max_qlen.
228 */
229#define TFO_SERVER_WO_SOCKOPT1 0x400
230#define TFO_SERVER_WO_SOCKOPT2 0x800
Jerry Chu10467162012-08-31 12:29:11 +0000231
Arnaldo Carvalho de Melo295ff7e2005-08-09 20:44:40 -0700232extern struct inet_timewait_death_row tcp_death_row;
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234/* sysctl variables for tcp */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235extern int sysctl_tcp_timestamps;
236extern int sysctl_tcp_window_scaling;
237extern int sysctl_tcp_sack;
238extern int sysctl_tcp_fin_timeout;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239extern int sysctl_tcp_keepalive_time;
240extern int sysctl_tcp_keepalive_probes;
241extern int sysctl_tcp_keepalive_intvl;
242extern int sysctl_tcp_syn_retries;
243extern int sysctl_tcp_synack_retries;
244extern int sysctl_tcp_retries1;
245extern int sysctl_tcp_retries2;
246extern int sysctl_tcp_orphan_retries;
247extern int sysctl_tcp_syncookies;
Yuchung Cheng2100c8d2012-07-19 06:43:05 +0000248extern int sysctl_tcp_fastopen;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249extern int sysctl_tcp_retrans_collapse;
250extern int sysctl_tcp_stdurg;
251extern int sysctl_tcp_rfc1337;
252extern int sysctl_tcp_abort_on_overflow;
253extern int sysctl_tcp_max_orphans;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254extern int sysctl_tcp_fack;
255extern int sysctl_tcp_reordering;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256extern int sysctl_tcp_dsack;
Eric W. Biedermana4fe34b2013-10-19 16:25:36 -0700257extern long sysctl_tcp_mem[3];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258extern int sysctl_tcp_wmem[3];
259extern int sysctl_tcp_rmem[3];
260extern int sysctl_tcp_app_win;
261extern int sysctl_tcp_adv_win_scale;
262extern int sysctl_tcp_tw_reuse;
263extern int sysctl_tcp_frto;
264extern int sysctl_tcp_low_latency;
Chris Leech95937822006-05-23 18:02:55 -0700265extern int sysctl_tcp_dma_copybreak;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266extern int sysctl_tcp_nometrics_save;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267extern int sysctl_tcp_moderate_rcvbuf;
268extern int sysctl_tcp_tso_win_divisor;
John Heffner5d424d52006-03-20 17:53:41 -0800269extern int sysctl_tcp_mtu_probing;
270extern int sysctl_tcp_base_mss;
Rick Jones15d99e02006-03-20 22:40:29 -0800271extern int sysctl_tcp_workaround_signed_windows;
David S. Miller35089bb2006-06-13 22:33:04 -0700272extern int sysctl_tcp_slow_start_after_idle;
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000273extern int sysctl_tcp_thin_linear_timeouts;
Andreas Petlund7e380172010-02-18 04:48:19 +0000274extern int sysctl_tcp_thin_dupack;
Yuchung Chengeed530b2012-05-02 13:30:03 +0000275extern int sysctl_tcp_early_retrans;
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000276extern int sysctl_tcp_limit_output_bytes;
Eric Dumazet282f23c2012-07-17 10:13:05 +0200277extern int sysctl_tcp_challenge_ack_limit;
Eric Dumazetc9bee3b72013-07-22 20:27:07 -0700278extern unsigned int sysctl_tcp_notsent_lowat;
Eric Dumazet95bd09e2013-08-27 05:46:32 -0700279extern int sysctl_tcp_min_tso_segs;
Eric Dumazetf54b3112013-12-05 22:36:05 -0800280extern int sysctl_tcp_autocorking;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
Eric Dumazet8d987e52010-11-09 23:24:26 +0000282extern atomic_long_t tcp_memory_allocated;
Eric Dumazet17483762008-11-25 21:16:35 -0800283extern struct percpu_counter tcp_sockets_allocated;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284extern int tcp_memory_pressure;
285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287 * The next routines deal with comparing 32 bit unsigned ints
288 * and worry about wraparound (automatic with unsigned arithmetic).
289 */
290
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000291static inline bool before(__u32 seq1, __u32 seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292{
Gerrit Renker0d630cc2007-01-04 12:25:16 -0800293 return (__s32)(seq1-seq2) < 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294}
Gerrit Renker9a036b9c2006-12-20 10:25:55 -0800295#define after(seq2, seq1) before(seq1, seq2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296
297/* is s2<=s1<=s3 ? */
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000298static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299{
300 return seq3 - seq2 >= seq1 - seq2;
301}
302
Arun Sharmaefcdbf22012-01-30 14:16:06 -0800303static inline bool tcp_out_of_memory(struct sock *sk)
304{
305 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
306 sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
307 return true;
308 return false;
309}
310
David S. Millerad1af0f2010-08-25 02:27:49 -0700311static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700312{
David S. Millerad1af0f2010-08-25 02:27:49 -0700313 struct percpu_counter *ocp = sk->sk_prot->orphan_count;
314 int orphans = percpu_counter_read_positive(ocp);
315
316 if (orphans << shift > sysctl_tcp_max_orphans) {
317 orphans = percpu_counter_sum_positive(ocp);
318 if (orphans << shift > sysctl_tcp_max_orphans)
319 return true;
320 }
David S. Millerad1af0f2010-08-25 02:27:49 -0700321 return false;
Pavel Emelianove4fd5da2007-05-29 13:19:18 -0700322}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323
Joe Perches5c9f3022013-09-23 11:33:32 -0700324bool tcp_check_oom(struct sock *sk, int shift);
Arun Sharmaefcdbf22012-01-30 14:16:06 -0800325
Florian Westphala0f82f62009-04-19 09:43:48 +0000326/* syncookies: remember time of last synqueue overflow */
327static inline void tcp_synq_overflow(struct sock *sk)
328{
329 tcp_sk(sk)->rx_opt.ts_recent_stamp = jiffies;
330}
331
332/* syncookies: no recent synqueue overflow on this listening socket? */
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000333static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
Florian Westphala0f82f62009-04-19 09:43:48 +0000334{
335 unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
Jerry Chu9ad7c042011-06-08 11:08:38 +0000336 return time_after(jiffies, last_overflow + TCP_TIMEOUT_FALLBACK);
Florian Westphala0f82f62009-04-19 09:43:48 +0000337}
338
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339extern struct proto tcp_prot;
340
Pavel Emelyanov57ef42d2008-07-18 04:02:08 -0700341#define TCP_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.tcp_statistics, field)
342#define TCP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->mib.tcp_statistics, field)
343#define TCP_DEC_STATS(net, field) SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
344#define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
Tom Herbertaa2ea052010-04-22 07:00:24 +0000345#define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
Joe Perches5c9f3022013-09-23 11:33:32 -0700347void tcp_tasklet_init(void);
Eric Dumazet46d3cea2012-07-11 05:50:31 +0000348
Joe Perches5c9f3022013-09-23 11:33:32 -0700349void tcp_v4_err(struct sk_buff *skb, u32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
Joe Perches5c9f3022013-09-23 11:33:32 -0700351void tcp_shutdown(struct sock *sk, int how);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352
Joe Perches5c9f3022013-09-23 11:33:32 -0700353void tcp_v4_early_demux(struct sk_buff *skb);
354int tcp_v4_rcv(struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355
Joe Perches5c9f3022013-09-23 11:33:32 -0700356int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
357int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
358 size_t size);
359int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
360 int flags);
361void tcp_release_cb(struct sock *sk);
362void tcp_wfree(struct sk_buff *skb);
363void tcp_write_timer_handler(struct sock *sk);
364void tcp_delack_timer_handler(struct sock *sk);
365int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
366int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
367 const struct tcphdr *th, unsigned int len);
368void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
369 const struct tcphdr *th, unsigned int len);
370void tcp_rcv_space_adjust(struct sock *sk);
371void tcp_cleanup_rbuf(struct sock *sk, int copied);
372int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
373void tcp_twsk_destructor(struct sock *sk);
374ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
375 struct pipe_inode_info *pipe, size_t len,
376 unsigned int flags);
Jens Axboe9c55e012007-11-06 23:30:13 -0800377
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700378static inline void tcp_dec_quickack_mode(struct sock *sk,
379 const unsigned int pkts)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700381 struct inet_connection_sock *icsk = inet_csk(sk);
David S. Millerfc6415bc2005-07-05 15:17:45 -0700382
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700383 if (icsk->icsk_ack.quick) {
384 if (pkts >= icsk->icsk_ack.quick) {
385 icsk->icsk_ack.quick = 0;
David S. Millerfc6415bc2005-07-05 15:17:45 -0700386 /* Leaving quickack mode we deflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700387 icsk->icsk_ack.ato = TCP_ATO_MIN;
David S. Millerfc6415bc2005-07-05 15:17:45 -0700388 } else
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700389 icsk->icsk_ack.quick -= pkts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 }
391}
392
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700393#define TCP_ECN_OK 1
394#define TCP_ECN_QUEUE_CWR 2
395#define TCP_ECN_DEMAND_CWR 4
Eric Dumazet7a269ff2011-09-22 20:02:19 +0000396#define TCP_ECN_SEEN 8
Ilpo Järvinenbdf1ee52007-05-27 02:04:16 -0700397
Eric Dumazetfd2c3ef2009-11-03 03:26:03 +0000398enum tcp_tw_status {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 TCP_TW_SUCCESS = 0,
400 TCP_TW_RST = 1,
401 TCP_TW_ACK = 2,
402 TCP_TW_SYN = 3
403};
404
405
Joe Perches5c9f3022013-09-23 11:33:32 -0700406enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
407 struct sk_buff *skb,
408 const struct tcphdr *th);
409struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
410 struct request_sock *req, struct request_sock **prev,
411 bool fastopen);
412int tcp_child_process(struct sock *parent, struct sock *child,
413 struct sk_buff *skb);
Neal Cardwell5ae344c2014-08-04 19:12:29 -0400414void tcp_enter_loss(struct sock *sk);
Joe Perches5c9f3022013-09-23 11:33:32 -0700415void tcp_clear_retrans(struct tcp_sock *tp);
416void tcp_update_metrics(struct sock *sk);
417void tcp_init_metrics(struct sock *sk);
418void tcp_metrics_init(void);
419bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
Hannes Frederic Sowaa26552a2014-08-14 22:06:12 +0200420 bool paws_check, bool timestamps);
Joe Perches5c9f3022013-09-23 11:33:32 -0700421bool tcp_remember_stamp(struct sock *sk);
422bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
423void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
424void tcp_disable_fack(struct tcp_sock *tp);
425void tcp_close(struct sock *sk, long timeout);
426void tcp_init_sock(struct sock *sk);
427unsigned int tcp_poll(struct file *file, struct socket *sock,
428 struct poll_table_struct *wait);
429int tcp_getsockopt(struct sock *sk, int level, int optname,
430 char __user *optval, int __user *optlen);
431int tcp_setsockopt(struct sock *sk, int level, int optname,
432 char __user *optval, unsigned int optlen);
433int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
Changli Gao53d31762010-07-10 20:41:06 +0000434 char __user *optval, int __user *optlen);
Joe Perches5c9f3022013-09-23 11:33:32 -0700435int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
Changli Gao53d31762010-07-10 20:41:06 +0000436 char __user *optval, unsigned int optlen);
Joe Perches5c9f3022013-09-23 11:33:32 -0700437void tcp_set_keepalive(struct sock *sk, int val);
438void tcp_syn_ack_timeout(struct sock *sk, struct request_sock *req);
439int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
440 size_t len, int nonblock, int flags, int *addr_len);
441void tcp_parse_options(const struct sk_buff *skb,
442 struct tcp_options_received *opt_rx,
443 int estab, struct tcp_fastopen_cookie *foc);
444const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
YOSHIFUJI Hideaki7d5d5522008-04-17 12:29:53 +0900445
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446/*
447 * TCP v4 functions exported for the inet6 API
448 */
449
Joe Perches5c9f3022013-09-23 11:33:32 -0700450void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
Neal Cardwell4fab9072014-08-14 12:40:05 -0400451void tcp_v4_mtu_reduced(struct sock *sk);
Joe Perches5c9f3022013-09-23 11:33:32 -0700452int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
453struct sock *tcp_create_openreq_child(struct sock *sk,
454 struct request_sock *req,
455 struct sk_buff *skb);
456struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
457 struct request_sock *req,
458 struct dst_entry *dst);
459int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
460int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
461int tcp_connect(struct sock *sk);
462struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,
463 struct request_sock *req,
464 struct tcp_fastopen_cookie *foc);
465int tcp_disconnect(struct sock *sk, int flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466
Pavel Emelyanov370816a2012-04-19 03:40:01 +0000467void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
Pavel Emelyanov292e8d82012-05-10 01:49:41 +0000468int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
Eric Dumazet63d02d12012-08-09 14:11:00 +0000469void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471/* From syncookies.c */
Joe Perches5c9f3022013-09-23 11:33:32 -0700472int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
473 u32 cookie);
474struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
475 struct ip_options *opt);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400476#ifdef CONFIG_SYN_COOKIES
Florian Westphal8c27bd72013-09-20 22:32:55 +0200477
Eric Dumazet63262312014-03-19 21:02:21 -0700478/* Syncookies use a monotonic timer which increments every 60 seconds.
Florian Westphal8c27bd72013-09-20 22:32:55 +0200479 * This counter is used both as a hash input and partially encoded into
480 * the cookie value. A cookie is only validated further if the delta
481 * between the current counter value and the encoded one is less than this,
Eric Dumazet63262312014-03-19 21:02:21 -0700482 * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
Florian Westphal8c27bd72013-09-20 22:32:55 +0200483 * the counter advances immediately after a cookie is generated).
484 */
485#define MAX_SYNCOOKIE_AGE 2
486
487static inline u32 tcp_cookie_time(void)
488{
Eric Dumazet63262312014-03-19 21:02:21 -0700489 u64 val = get_jiffies_64();
490
491 do_div(val, 60 * HZ);
492 return val;
Florian Westphal8c27bd72013-09-20 22:32:55 +0200493}
494
Joe Perches5c9f3022013-09-23 11:33:32 -0700495u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
496 u16 *mssp);
Octavian Purdila57b47552014-06-25 17:09:50 +0300497__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
498 __u16 *mss);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400499#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Joe Perches5c9f3022013-09-23 11:33:32 -0700501__u32 cookie_init_timestamp(struct request_sock *req);
502bool cookie_check_timestamp(struct tcp_options_received *opt, struct net *net,
503 bool *ecn_ok);
Florian Westphal4dfc2812008-04-10 03:12:40 -0700504
Glenn Griffinc6aefaf2008-02-07 21:49:26 -0800505/* From net/ipv6/syncookies.c */
Joe Perches5c9f3022013-09-23 11:33:32 -0700506int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
507 u32 cookie);
508struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400509#ifdef CONFIG_SYN_COOKIES
Joe Perches5c9f3022013-09-23 11:33:32 -0700510u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
511 const struct tcphdr *th, u16 *mssp);
512__u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
513 __u16 *mss);
Eric Dumazete05c82d2011-09-18 21:02:55 -0400514#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515/* tcp_output.c */
516
Joe Perches5c9f3022013-09-23 11:33:32 -0700517void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
518 int nonagle);
519bool tcp_may_send_now(struct sock *sk);
520int __tcp_retransmit_skb(struct sock *, struct sk_buff *);
521int tcp_retransmit_skb(struct sock *, struct sk_buff *);
522void tcp_retransmit_timer(struct sock *sk);
523void tcp_xmit_retransmit_queue(struct sock *);
524void tcp_simple_retransmit(struct sock *);
525int tcp_trim_head(struct sock *, struct sk_buff *, u32);
Octavian Purdila6cc55e02014-06-06 17:32:37 +0300526int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527
Joe Perches5c9f3022013-09-23 11:33:32 -0700528void tcp_send_probe0(struct sock *);
529void tcp_send_partial(struct sock *);
530int tcp_write_wakeup(struct sock *);
531void tcp_send_fin(struct sock *sk);
532void tcp_send_active_reset(struct sock *sk, gfp_t priority);
533int tcp_send_synack(struct sock *);
534bool tcp_syn_flood_action(struct sock *sk, const struct sk_buff *skb,
535 const char *proto);
536void tcp_push_one(struct sock *, unsigned int mss_now);
537void tcp_send_ack(struct sock *sk);
538void tcp_send_delayed_ack(struct sock *sk);
539void tcp_send_loss_probe(struct sock *sk);
540bool tcp_schedule_loss_probe(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
David S. Millera762a982005-07-05 15:18:51 -0700542/* tcp_input.c */
Joe Perches5c9f3022013-09-23 11:33:32 -0700543void tcp_resume_early_retransmit(struct sock *sk);
544void tcp_rearm_rto(struct sock *sk);
545void tcp_reset(struct sock *sk);
David S. Millera762a982005-07-05 15:18:51 -0700546
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547/* tcp_timer.c */
Joe Perches5c9f3022013-09-23 11:33:32 -0700548void tcp_init_xmit_timers(struct sock *);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700549static inline void tcp_clear_xmit_timers(struct sock *sk)
550{
551 inet_csk_clear_xmit_timers(sk);
552}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Joe Perches5c9f3022013-09-23 11:33:32 -0700554unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
555unsigned int tcp_current_mss(struct sock *sk);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000556
557/* Bound MSS / TSO packet size with the half of the window */
558static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
559{
Alexey Kuznetsov01f83d62010-09-15 10:27:52 -0700560 int cutoff;
561
562 /* When peer uses tiny windows, there is no use in packetizing
563 * to sub-MSS pieces for the sake of SWS or making sure there
564 * are enough packets in the pipe for fast recovery.
565 *
566 * On the other hand, for extremely large MSS devices, handling
567 * smaller than MSS windows in this way does make sense.
568 */
569 if (tp->max_window >= 512)
570 cutoff = (tp->max_window >> 1);
571 else
572 cutoff = tp->max_window;
573
574 if (cutoff && pktsize > cutoff)
575 return max_t(int, cutoff, 68U - tp->tcp_header_len);
Ilpo Järvinen0c54b852009-03-14 14:23:05 +0000576 else
577 return pktsize;
578}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
Arnaldo Carvalho de Melo17b085e2005-08-12 12:59:17 -0300580/* tcp.c */
Joe Perches5c9f3022013-09-23 11:33:32 -0700581void tcp_get_info(const struct sock *, struct tcp_info *);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583/* Read 'sendfile()'-style from a TCP socket */
584typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
585 unsigned int, size_t);
Joe Perches5c9f3022013-09-23 11:33:32 -0700586int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
587 sk_read_actor_t recv_actor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
Joe Perches5c9f3022013-09-23 11:33:32 -0700589void tcp_initialize_rcv_mss(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
Joe Perches5c9f3022013-09-23 11:33:32 -0700591int tcp_mtu_to_mss(struct sock *sk, int pmtu);
592int tcp_mss_to_mtu(struct sock *sk, int mss);
593void tcp_mtup_init(struct sock *sk);
594void tcp_init_buffer_space(struct sock *sk);
John Heffner5d424d52006-03-20 17:53:41 -0800595
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000596static inline void tcp_bound_rto(const struct sock *sk)
597{
598 if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
599 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
600}
601
602static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
603{
Eric Dumazet740b0f12014-02-26 14:02:48 -0800604 return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000605}
606
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800607static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
609 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
610 ntohl(TCP_FLAG_ACK) |
611 snd_wnd);
612}
613
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800614static inline void tcp_fast_path_on(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615{
616 __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
617}
618
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700619static inline void tcp_fast_path_check(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620{
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -0700621 struct tcp_sock *tp = tcp_sk(sk);
622
David S. Millerb03efcf2005-07-08 14:57:23 -0700623 if (skb_queue_empty(&tp->out_of_order_queue) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 tp->rcv_wnd &&
625 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
626 !tp->urg_data)
627 tcp_fast_path_on(tp);
628}
629
Satoru SATOH0c266892009-05-04 11:11:01 -0700630/* Compute the actual rto_min value */
631static inline u32 tcp_rto_min(struct sock *sk)
632{
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400633 const struct dst_entry *dst = __sk_dst_get(sk);
Satoru SATOH0c266892009-05-04 11:11:01 -0700634 u32 rto_min = TCP_RTO_MIN;
635
636 if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
637 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
638 return rto_min;
639}
640
Eric Dumazet740b0f12014-02-26 14:02:48 -0800641static inline u32 tcp_rto_min_us(struct sock *sk)
642{
643 return jiffies_to_usecs(tcp_rto_min(sk));
644}
645
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646/* Compute the actual receive window we are currently advertising.
647 * Rcv_nxt can be after the window if our peer push more data
648 * than the offered window.
649 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800650static inline u32 tcp_receive_window(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
652 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
653
654 if (win < 0)
655 win = 0;
656 return (u32) win;
657}
658
659/* Choose a new window, without checks for shrinking, and without
660 * scaling applied to the result. The caller does these things
661 * if necessary. This is a "raw" window selection.
662 */
Joe Perches5c9f3022013-09-23 11:33:32 -0700663u32 __tcp_select_window(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664
Pavel Emelyanovee995282012-04-19 03:40:39 +0000665void tcp_send_window_probe(struct sock *sk);
666
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667/* TCP timestamps are only 32-bits, this causes a slight
668 * complication on 64-bit systems since we store a snapshot
Stephen Hemminger31f34262005-11-15 15:17:10 -0800669 * of jiffies in the buffer control blocks below. We decided
670 * to use only the low 32-bits of jiffies and hide the ugly
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671 * casts with the following macro.
672 */
673#define tcp_time_stamp ((__u32)(jiffies))
674
Eric Dumazet7faee5c2014-09-05 15:33:33 -0700675static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
676{
677 return skb->skb_mstamp.stamp_jiffies;
678}
679
680
Changli Gaoa3433f32010-06-12 14:01:43 +0000681#define tcp_flag_byte(th) (((u_int8_t *)th)[13])
682
683#define TCPHDR_FIN 0x01
684#define TCPHDR_SYN 0x02
685#define TCPHDR_RST 0x04
686#define TCPHDR_PSH 0x08
687#define TCPHDR_ACK 0x10
688#define TCPHDR_URG 0x20
689#define TCPHDR_ECE 0x40
690#define TCPHDR_CWR 0x80
691
Stephen Hemmingercaa20d9a2005-11-10 17:13:47 -0800692/* This is what the send packet queuing engine uses to pass
Eric Dumazetf86586f2010-07-15 21:41:00 -0700693 * TCP per-packet control information to the transmission code.
694 * We also store the host-order sequence numbers in here too.
695 * This is 44 bytes if IPV6 is enabled.
696 * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 */
698struct tcp_skb_cb {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 __u32 seq; /* Starting sequence number */
700 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
Eric Dumazetcd7d8492014-09-24 04:11:22 -0700701 union {
702 /* Note : tcp_tw_isn is used in input path only
703 * (isn chosen by tcp_timewait_state_process())
704 *
705 * tcp_gso_segs is used in write queue only,
706 * cf tcp_skb_pcount()
707 */
708 __u32 tcp_tw_isn;
709 __u32 tcp_gso_segs;
710 };
Eric Dumazet4de075e2011-09-27 13:25:05 -0400711 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
Neal Cardwellf4f9f6e2012-04-16 07:08:06 +0000712
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 __u8 sacked; /* State flags for SACK/FACK. */
714#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
715#define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
716#define TCPCB_LOST 0x04 /* SKB is lost */
717#define TCPCB_TAGBITS 0x07 /* All tag bits */
Andrey Vagin9d186ca2014-08-13 16:03:10 +0400718#define TCPCB_REPAIRED 0x10 /* SKB repaired (no skb_mstamp) */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
Andrey Vagin9d186ca2014-08-13 16:03:10 +0400720#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
721 TCPCB_REPAIRED)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
Neal Cardwellf4f9f6e2012-04-16 07:08:06 +0000723 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
724 /* 1 byte hole */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 __u32 ack_seq; /* Sequence number ACK'd */
Eric Dumazet971f10e2014-09-27 09:50:57 -0700726 union {
727 struct inet_skb_parm h4;
728#if IS_ENABLED(CONFIG_IPV6)
729 struct inet6_skb_parm h6;
730#endif
731 } header; /* For incoming frames */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732};
733
734#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
735
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736/* Due to TSO, an SKB can be composed of multiple actual
737 * packets. To keep these tracked properly, we use this.
738 */
739static inline int tcp_skb_pcount(const struct sk_buff *skb)
740{
Eric Dumazetcd7d8492014-09-24 04:11:22 -0700741 return TCP_SKB_CB(skb)->tcp_gso_segs;
742}
743
744static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
745{
746 TCP_SKB_CB(skb)->tcp_gso_segs = segs;
747}
748
749static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
750{
751 TCP_SKB_CB(skb)->tcp_gso_segs += segs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752}
753
754/* This is valid iff tcp_skb_pcount() > 1. */
755static inline int tcp_skb_mss(const struct sk_buff *skb)
756{
Herbert Xu79671682006-06-22 02:40:14 -0700757 return skb_shinfo(skb)->gso_size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758}
759
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700760/* Events passed to congestion control interface */
761enum tcp_ca_event {
762 CA_EVENT_TX_START, /* first transmit when no packets in flight */
763 CA_EVENT_CWND_RESTART, /* congestion window restart */
764 CA_EVENT_COMPLETE_CWR, /* end of congestion recovery */
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700765 CA_EVENT_LOSS, /* loss timeout */
Florian Westphal98900922014-09-26 22:37:35 +0200766 CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
767 CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
768 CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
769 CA_EVENT_NON_DELAYED_ACK,
Florian Westphal7354c8c2014-09-26 22:37:34 +0200770};
771
Florian Westphal98900922014-09-26 22:37:35 +0200772/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
Florian Westphal7354c8c2014-09-26 22:37:34 +0200773enum tcp_ca_ack_event_flags {
Florian Westphal98900922014-09-26 22:37:35 +0200774 CA_ACK_SLOWPATH = (1 << 0), /* In slow path processing */
775 CA_ACK_WIN_UPDATE = (1 << 1), /* ACK updated window */
776 CA_ACK_ECE = (1 << 2), /* ECE bit is set on ack */
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700777};
778
779/*
780 * Interface for adding new TCP congestion control handlers
781 */
782#define TCP_CA_NAME_MAX 16
Stephen Hemminger3ff825b2006-11-09 16:32:06 -0800783#define TCP_CA_MAX 128
784#define TCP_CA_BUF_MAX (TCP_CA_NAME_MAX*TCP_CA_MAX)
785
Daniel Borkmann30e502a2014-09-26 22:37:33 +0200786/* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
Stephen Hemminger164891a2007-04-23 22:26:16 -0700787#define TCP_CONG_NON_RESTRICTED 0x1
Daniel Borkmann30e502a2014-09-26 22:37:33 +0200788/* Requires ECN/ECT set on all packets */
789#define TCP_CONG_NEEDS_ECN 0x2
Stephen Hemminger164891a2007-04-23 22:26:16 -0700790
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700791struct tcp_congestion_ops {
792 struct list_head list;
Stephen Hemminger164891a2007-04-23 22:26:16 -0700793 unsigned long flags;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700794
795 /* initialize private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300796 void (*init)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700797 /* cleanup private data (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300798 void (*release)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700799
800 /* return slow start threshold (required) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300801 u32 (*ssthresh)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700802 /* do new cwnd calculation (required) */
Eric Dumazet24901552014-05-02 21:18:05 -0700803 void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700804 /* call before changing ca_state (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300805 void (*set_state)(struct sock *sk, u8 new_state);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700806 /* call when cwnd event occurs (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300807 void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
Florian Westphal7354c8c2014-09-26 22:37:34 +0200808 /* call when ack arrives (optional) */
809 void (*in_ack_event)(struct sock *sk, u32 flags);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700810 /* new value of cwnd after loss (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300811 u32 (*undo_cwnd)(struct sock *sk);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700812 /* hook for packet ack accounting (optional) */
Stephen Hemminger30cfd0b2007-07-25 23:49:34 -0700813 void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
Arnaldo Carvalho de Melo73c1f4a2005-08-12 12:51:49 -0300814 /* get info for inet_diag (optional) */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300815 void (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700816
817 char name[TCP_CA_NAME_MAX];
818 struct module *owner;
819};
820
Joe Perches5c9f3022013-09-23 11:33:32 -0700821int tcp_register_congestion_control(struct tcp_congestion_ops *type);
822void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700823
Florian Westphal55d86942014-09-26 22:37:32 +0200824void tcp_assign_congestion_control(struct sock *sk);
Joe Perches5c9f3022013-09-23 11:33:32 -0700825void tcp_init_congestion_control(struct sock *sk);
826void tcp_cleanup_congestion_control(struct sock *sk);
827int tcp_set_default_congestion_control(const char *name);
828void tcp_get_default_congestion_control(char *name);
829void tcp_get_available_congestion_control(char *buf, size_t len);
830void tcp_get_allowed_congestion_control(char *buf, size_t len);
831int tcp_set_allowed_congestion_control(char *allowed);
832int tcp_set_congestion_control(struct sock *sk, const char *name);
Yuchung Cheng9f9843a72013-10-31 11:07:31 -0700833int tcp_slow_start(struct tcp_sock *tp, u32 acked);
Joe Perches5c9f3022013-09-23 11:33:32 -0700834void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700835
Joe Perches5c9f3022013-09-23 11:33:32 -0700836u32 tcp_reno_ssthresh(struct sock *sk);
Eric Dumazet24901552014-05-02 21:18:05 -0700837void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
David S. Millera8acfba2005-06-23 23:45:02 -0700838extern struct tcp_congestion_ops tcp_reno;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700839
Daniel Borkmann30e502a2014-09-26 22:37:33 +0200840static inline bool tcp_ca_needs_ecn(const struct sock *sk)
841{
842 const struct inet_connection_sock *icsk = inet_csk(sk);
843
844 return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
845}
846
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300847static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700848{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300849 struct inet_connection_sock *icsk = inet_csk(sk);
850
851 if (icsk->icsk_ca_ops->set_state)
852 icsk->icsk_ca_ops->set_state(sk, ca_state);
853 icsk->icsk_ca_state = ca_state;
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700854}
855
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300856static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700857{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300858 const struct inet_connection_sock *icsk = inet_csk(sk);
859
860 if (icsk->icsk_ca_ops->cwnd_event)
861 icsk->icsk_ca_ops->cwnd_event(sk, event);
Stephen Hemminger317a76f2005-06-23 12:19:55 -0700862}
863
Daniel Borkmann30e502a2014-09-26 22:37:33 +0200864/* RFC3168 : 6.1.1 SYN packets must not have ECT/ECN bits set
865 *
866 * If we receive a SYN packet with these bits set, it means a
867 * network is playing bad games with TOS bits. In order to
868 * avoid possible false congestion notifications, we disable
869 * TCP ECN negociation.
870 *
871 * Exception: tcp_ca wants ECN. This is required for DCTCP
872 * congestion control; it requires setting ECT on all packets,
873 * including SYN. We inverse the test in this case: If our
874 * local socket wants ECN, but peer only set ece/cwr (but not
875 * ECT in IP header) its probably a non-DCTCP aware sender.
876 */
877static inline void
878TCP_ECN_create_request(struct request_sock *req, const struct sk_buff *skb,
879 const struct sock *listen_sk)
880{
881 const struct tcphdr *th = tcp_hdr(skb);
882 const struct net *net = sock_net(listen_sk);
883 bool th_ecn = th->ece && th->cwr;
884 bool ect, need_ecn;
885
886 if (!th_ecn)
887 return;
888
889 ect = !INET_ECN_is_not_ect(TCP_SKB_CB(skb)->ip_dsfield);
890 need_ecn = tcp_ca_needs_ecn(listen_sk);
891
892 if (!ect && !need_ecn && net->ipv4.sysctl_tcp_ecn)
893 inet_rsk(req)->ecn_ok = 1;
894 else if (ect && need_ecn)
895 inet_rsk(req)->ecn_ok = 1;
896}
897
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300898/* These functions determine how the current flow behaves in respect of SACK
899 * handling. SACK is negotiated with the peer, and therefore it can vary
900 * between different flows.
901 *
902 * tcp_is_sack - SACK enabled
903 * tcp_is_reno - No SACK
904 * tcp_is_fack - FACK enabled, implies SACK enabled
905 */
906static inline int tcp_is_sack(const struct tcp_sock *tp)
907{
908 return tp->rx_opt.sack_ok;
909}
910
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000911static inline bool tcp_is_reno(const struct tcp_sock *tp)
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300912{
913 return !tcp_is_sack(tp);
914}
915
Eric Dumazeta2a385d2012-05-16 23:15:34 +0000916static inline bool tcp_is_fack(const struct tcp_sock *tp)
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300917{
Vijay Subramanianab562222011-12-20 13:23:24 +0000918 return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300919}
920
921static inline void tcp_enable_fack(struct tcp_sock *tp)
922{
Vijay Subramanianab562222011-12-20 13:23:24 +0000923 tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
Ilpo Järvinene60402d2007-08-09 15:14:46 +0300924}
925
Yuchung Chengeed530b2012-05-02 13:30:03 +0000926/* TCP early-retransmit (ER) is similar to but more conservative than
927 * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
928 */
929static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
930{
931 tp->do_early_retrans = sysctl_tcp_early_retrans &&
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +0000932 sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
933 sysctl_tcp_reordering == 3;
Yuchung Chengeed530b2012-05-02 13:30:03 +0000934}
935
936static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
937{
938 tp->do_early_retrans = 0;
939}
940
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300941static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
942{
943 return tp->sacked_out + tp->lost_out;
944}
945
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946/* This determines how many packets are "in the network" to the best
947 * of our knowledge. In many cases it is conservative, but where
948 * detailed information is available from the receiver (via SACK
949 * blocks etc.) we can make more aggressive calculations.
950 *
951 * Use this for decisions involving congestion control, use just
952 * tp->packets_out to determine if the send queue is empty or not.
953 *
954 * Read this equation as:
955 *
956 * "Packets sent once on transmission queue" MINUS
957 * "Packets left network, but not honestly ACKed yet" PLUS
958 * "Packets fast retransmitted"
959 */
Stephen Hemminger40efc6f2006-01-03 16:03:49 -0800960static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961{
Ilpo Järvinen83ae4082007-08-09 14:37:30 +0300962 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963}
964
Ilpo Järvinen0b6a05c2009-09-15 01:30:10 -0700965#define TCP_INFINITE_SSTHRESH 0x7fffffff
966
967static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
968{
969 return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
970}
971
Yuchung Cheng684bad12012-09-02 17:38:04 +0000972static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
973{
974 return (TCPF_CA_CWR | TCPF_CA_Recovery) &
975 (1 << inet_csk(sk)->icsk_ca_state);
976}
977
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978/* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
Yuchung Cheng684bad12012-09-02 17:38:04 +0000979 * The exception is cwnd reduction phase, when cwnd is decreasing towards
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 * ssthresh.
981 */
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300982static inline __u32 tcp_current_ssthresh(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300984 const struct tcp_sock *tp = tcp_sk(sk);
Eric Dumazetcf533ea2011-10-21 05:22:42 -0400985
Yuchung Cheng684bad12012-09-02 17:38:04 +0000986 if (tcp_in_cwnd_reduction(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 return tp->snd_ssthresh;
988 else
989 return max(tp->snd_ssthresh,
990 ((tp->snd_cwnd >> 1) +
991 (tp->snd_cwnd >> 2)));
992}
993
Ilpo Järvinenb9c45952007-07-27 16:36:17 +0300994/* Use define here intentionally to get WARN_ON location shown at the caller */
995#define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996
Christoph Paasch5ee2c942014-07-14 16:58:32 +0200997void tcp_enter_cwr(struct sock *sk);
Joe Perches5c9f3022013-09-23 11:33:32 -0700998__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
Neal Cardwell6b5a5c02011-11-21 17:15:14 +00001000/* The maximum number of MSS of available cwnd for which TSO defers
1001 * sending if not using sysctl_tcp_tso_win_divisor.
1002 */
1003static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1004{
1005 return 3;
1006}
1007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008/* Slow start with delack produces 3 packets of burst, so that
John Heffnerdd9e0dd2008-04-15 15:26:39 -07001009 * it is safe "de facto". This will be the default - same as
1010 * the default reordering threshold - but if reordering increases,
1011 * we must be able to allow cwnd to burst at least this much in order
1012 * to not pull it back when holes are filled.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 */
1014static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
1015{
John Heffnerdd9e0dd2008-04-15 15:26:39 -07001016 return tp->reordering;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017}
1018
Ilpo Järvinen90840defa2007-12-31 04:48:41 -08001019/* Returns end sequence number of the receiver's advertised window */
1020static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1021{
1022 return tp->snd_una + tp->snd_wnd;
1023}
Eric Dumazete114a712014-04-30 11:58:13 -07001024
1025/* We follow the spirit of RFC2861 to validate cwnd but implement a more
1026 * flexible approach. The RFC suggests cwnd should not be raised unless
Neal Cardwellca8a2262014-05-22 10:41:08 -04001027 * it was fully used previously. And that's exactly what we do in
1028 * congestion avoidance mode. But in slow start we allow cwnd to grow
1029 * as long as the application has used half the cwnd.
Eric Dumazete114a712014-04-30 11:58:13 -07001030 * Example :
1031 * cwnd is 10 (IW10), but application sends 9 frames.
1032 * We allow cwnd to reach 18 when all frames are ACKed.
1033 * This check is safe because it's as aggressive as slow start which already
1034 * risks 100% overshoot. The advantage is that we discourage application to
1035 * either send more filler packets or data to artificially blow up the cwnd
1036 * usage, and allow application-limited process to probe bw more aggressively.
Eric Dumazete114a712014-04-30 11:58:13 -07001037 */
Eric Dumazet24901552014-05-02 21:18:05 -07001038static inline bool tcp_is_cwnd_limited(const struct sock *sk)
Eric Dumazete114a712014-04-30 11:58:13 -07001039{
1040 const struct tcp_sock *tp = tcp_sk(sk);
1041
Neal Cardwellca8a2262014-05-22 10:41:08 -04001042 /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1043 if (tp->snd_cwnd <= tp->snd_ssthresh)
1044 return tp->snd_cwnd < 2 * tp->max_packets_out;
1045
1046 return tp->is_cwnd_limited;
Eric Dumazete114a712014-04-30 11:58:13 -07001047}
Stephen Hemmingerf4805ed2005-11-10 16:53:30 -08001048
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001049static inline void tcp_check_probe_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050{
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001051 const struct tcp_sock *tp = tcp_sk(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001052 const struct inet_connection_sock *icsk = inet_csk(sk);
Ilpo Järvinen9e412ba2007-04-20 22:18:02 -07001053
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001054 if (!tp->packets_out && !icsk->icsk_pending)
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -07001055 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1056 icsk->icsk_rto, TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057}
1058
Hantzis Fotisee7537b2009-03-02 22:42:02 -08001059static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001060{
1061 tp->snd_wl1 = seq;
1062}
1063
Hantzis Fotisee7537b2009-03-02 22:42:02 -08001064static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065{
1066 tp->snd_wl1 = seq;
1067}
1068
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069/*
1070 * Calculate(/check) TCP checksum
1071 */
Frederik Deweerdtba7808e2007-02-04 20:15:27 -08001072static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1073 __be32 daddr, __wsum base)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074{
1075 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1076}
1077
Al Virob51655b2006-11-14 21:40:42 -08001078static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079{
Herbert Xufb286bb2005-11-10 13:01:24 -08001080 return __skb_checksum_complete(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081}
1082
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001083static inline bool tcp_checksum_complete(struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084{
Herbert Xu60476372007-04-09 11:59:39 -07001085 return !skb_csum_unnecessary(skb) &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086 __tcp_checksum_complete(skb);
1087}
1088
1089/* Prequeue for VJ style copy to user, combined with checksumming. */
1090
Stephen Hemminger40efc6f2006-01-03 16:03:49 -08001091static inline void tcp_prequeue_init(struct tcp_sock *tp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092{
1093 tp->ucopy.task = NULL;
1094 tp->ucopy.len = 0;
1095 tp->ucopy.memory = 0;
1096 skb_queue_head_init(&tp->ucopy.prequeue);
Chris Leech97fc2f02006-05-23 17:55:33 -07001097#ifdef CONFIG_NET_DMA
1098 tp->ucopy.dma_chan = NULL;
1099 tp->ucopy.wakeup = 0;
1100 tp->ucopy.pinned_list = NULL;
1101 tp->ucopy.dma_cookie = 0;
1102#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103}
1104
Joe Perches5c9f3022013-09-23 11:33:32 -07001105bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107#undef STATE_TRACE
1108
1109#ifdef STATE_TRACE
1110static const char *statename[]={
1111 "Unused","Established","Syn Sent","Syn Recv",
1112 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1113 "Close Wait","Last ACK","Listen","Closing"
1114};
1115#endif
Joe Perches5c9f3022013-09-23 11:33:32 -07001116void tcp_set_state(struct sock *sk, int state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117
Joe Perches5c9f3022013-09-23 11:33:32 -07001118void tcp_done(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119
Stephen Hemminger40efc6f2006-01-03 16:03:49 -08001120static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121{
1122 rx_opt->dsack = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 rx_opt->num_sacks = 0;
1124}
1125
Joe Perches5c9f3022013-09-23 11:33:32 -07001126u32 tcp_default_init_rwnd(u32 mss);
Yuchung Cheng85f16522013-06-11 15:35:32 -07001127
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128/* Determine a window scaling and initial window to offer. */
Joe Perches5c9f3022013-09-23 11:33:32 -07001129void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
1130 __u32 *window_clamp, int wscale_ok,
1131 __u8 *rcv_wscale, __u32 init_rcv_wnd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001132
1133static inline int tcp_win_from_space(int space)
1134{
1135 return sysctl_tcp_adv_win_scale<=0 ?
1136 (space>>(-sysctl_tcp_adv_win_scale)) :
1137 space - (space>>sysctl_tcp_adv_win_scale);
1138}
1139
1140/* Note: caller must be prepared to deal with negative returns */
1141static inline int tcp_space(const struct sock *sk)
1142{
1143 return tcp_win_from_space(sk->sk_rcvbuf -
1144 atomic_read(&sk->sk_rmem_alloc));
1145}
1146
1147static inline int tcp_full_space(const struct sock *sk)
1148{
1149 return tcp_win_from_space(sk->sk_rcvbuf);
1150}
1151
Stephen Hemminger40efc6f2006-01-03 16:03:49 -08001152static inline void tcp_openreq_init(struct request_sock *req,
1153 struct tcp_options_received *rx_opt,
Octavian Purdilae0f802f2014-06-17 11:25:37 +03001154 struct sk_buff *skb, struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155{
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001156 struct inet_request_sock *ireq = inet_rsk(req);
1157
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
Florian Westphal4dfc2812008-04-10 03:12:40 -07001159 req->cookie_ts = 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001160 tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
Jerry Chu10467162012-08-31 12:29:11 +00001161 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
Neal Cardwell86c6a2c2014-06-30 15:09:49 -04001162 tcp_rsk(req)->snt_synack = tcp_time_stamp;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 req->mss = rx_opt->mss_clamp;
1164 req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
Arnaldo Carvalho de Melo2e6599c2005-06-18 22:46:52 -07001165 ireq->tstamp_ok = rx_opt->tstamp_ok;
1166 ireq->sack_ok = rx_opt->sack_ok;
1167 ireq->snd_wscale = rx_opt->snd_wscale;
1168 ireq->wscale_ok = rx_opt->wscale_ok;
1169 ireq->acked = 0;
1170 ireq->ecn_ok = 0;
Eric Dumazet634fb9792013-10-09 15:21:29 -07001171 ireq->ir_rmt_port = tcp_hdr(skb)->source;
Eric Dumazetb44084c2013-10-10 00:04:37 -07001172 ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
Octavian Purdilae0f802f2014-06-17 11:25:37 +03001173 ireq->ir_mark = inet_request_mark(sk, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174}
1175
Yuchung Cheng843f4a52014-05-11 20:22:11 -07001176extern void tcp_openreq_init_rwin(struct request_sock *req,
1177 struct sock *sk, struct dst_entry *dst);
1178
Joe Perches5c9f3022013-09-23 11:33:32 -07001179void tcp_enter_memory_pressure(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1182{
1183 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1184}
1185
1186static inline int keepalive_time_when(const struct tcp_sock *tp)
1187{
1188 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1189}
1190
Eric Dumazetdf19a622009-08-28 23:48:54 -07001191static inline int keepalive_probes(const struct tcp_sock *tp)
1192{
1193 return tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
1194}
1195
Flavio Leitner6c37e5d2010-04-26 18:33:27 +00001196static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1197{
1198 const struct inet_connection_sock *icsk = &tp->inet_conn;
1199
1200 return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1201 tcp_time_stamp - tp->rcv_tstamp);
1202}
1203
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001204static inline int tcp_fin_time(const struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001206 int fin_timeout = tcp_sk(sk)->linger2 ? : sysctl_tcp_fin_timeout;
1207 const int rto = inet_csk(sk)->icsk_rto;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -07001209 if (fin_timeout < (rto << 2) - (rto >> 1))
1210 fin_timeout = (rto << 2) - (rto >> 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211
1212 return fin_timeout;
1213}
1214
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001215static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1216 int paws_win)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217{
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001218 if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001219 return true;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001220 if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001221 return true;
Eric Dumazetbc2ce892010-12-16 14:08:34 -08001222 /*
1223 * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1224 * then following tcp messages have valid values. Ignore 0 value,
1225 * or else 'negative' tsval might forbid us to accept their packets.
1226 */
1227 if (!rx_opt->ts_recent)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001228 return true;
1229 return false;
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001230}
1231
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001232static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1233 int rst)
Ilpo Järvinenc887e6d2009-03-14 14:23:03 +00001234{
1235 if (tcp_paws_check(rx_opt, 0))
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001236 return false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
1238 /* RST segments are not recommended to carry timestamp,
1239 and, if they do, it is recommended to ignore PAWS because
1240 "their cleanup function should take precedence over timestamps."
1241 Certainly, it is mistake. It is necessary to understand the reasons
1242 of this constraint to relax it: if peer reboots, clock may go
1243 out-of-sync and half-open connections will not be reset.
1244 Actually, the problem would be not existing if all
1245 the implementations followed draft about maintaining clock
1246 via reboots. Linux-2.2 DOES NOT!
1247
1248 However, we can relax time bounds for RST segments to MSL.
1249 */
James Morris9d729f72007-03-04 16:12:44 -08001250 if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001251 return false;
1252 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253}
1254
Pavel Emelyanova9c193292008-07-16 20:21:42 -07001255static inline void tcp_mib_init(struct net *net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256{
1257 /* See RFC 2012 */
Pavel Emelyanovcf1100a2008-07-16 20:27:38 -07001258 TCP_ADD_STATS_USER(net, TCP_MIB_RTOALGORITHM, 1);
1259 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1260 TCP_ADD_STATS_USER(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1261 TCP_ADD_STATS_USER(net, TCP_MIB_MAXCONN, -1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262}
1263
Ilpo Järvinen5af4ec22007-09-20 11:30:48 -07001264/* from STCP */
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001265static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
David S. Miller0800f172007-09-20 11:40:37 -07001266{
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001267 tp->lost_skb_hint = NULL;
Ilpo Järvinenef9da472008-09-20 21:25:15 -07001268}
1269
1270static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1271{
1272 tcp_clear_retrans_hints_partial(tp);
Stephen Hemminger6a438bb2005-11-10 17:14:59 -08001273 tp->retransmit_skb_hint = NULL;
Ilpo Järvinenb7689202007-09-20 11:37:19 -07001274}
1275
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001276/* MD5 Signature */
1277struct crypto_hash;
1278
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001279union tcp_md5_addr {
1280 struct in_addr a4;
1281#if IS_ENABLED(CONFIG_IPV6)
1282 struct in6_addr a6;
1283#endif
1284};
1285
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001286/* - key database */
1287struct tcp_md5sig_key {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001288 struct hlist_node node;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001289 u8 keylen;
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001290 u8 family; /* AF_INET or AF_INET6 */
1291 union tcp_md5_addr addr;
1292 u8 key[TCP_MD5SIG_MAXKEYLEN];
1293 struct rcu_head rcu;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001294};
1295
1296/* - sock block */
1297struct tcp_md5sig_info {
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001298 struct hlist_head head;
Eric Dumazeta8afca02012-01-31 18:45:40 +00001299 struct rcu_head rcu;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001300};
1301
1302/* - pseudo header */
1303struct tcp4_pseudohdr {
1304 __be32 saddr;
1305 __be32 daddr;
1306 __u8 pad;
1307 __u8 protocol;
1308 __be16 len;
1309};
1310
1311struct tcp6_pseudohdr {
1312 struct in6_addr saddr;
1313 struct in6_addr daddr;
1314 __be32 len;
1315 __be32 protocol; /* including padding */
1316};
1317
1318union tcp_md5sum_block {
1319 struct tcp4_pseudohdr ip4;
Eric Dumazetdfd56b82011-12-10 09:48:31 +00001320#if IS_ENABLED(CONFIG_IPV6)
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001321 struct tcp6_pseudohdr ip6;
1322#endif
1323};
1324
1325/* - pool: digest algorithm, hash description and scratch buffer */
1326struct tcp_md5sig_pool {
1327 struct hash_desc md5_desc;
1328 union tcp_md5sum_block md5_blk;
1329};
1330
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001331/* - functions */
Joe Perches5c9f3022013-09-23 11:33:32 -07001332int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1333 const struct sock *sk, const struct request_sock *req,
1334 const struct sk_buff *skb);
1335int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1336 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
1337int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1338 int family);
1339struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001340 struct sock *addr_sk);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001341
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001342#ifdef CONFIG_TCP_MD5SIG
Joe Perches5c9f3022013-09-23 11:33:32 -07001343struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1344 const union tcp_md5_addr *addr,
1345 int family);
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001346#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001347#else
Eric Dumazeta915da9b2012-01-31 05:18:33 +00001348static inline struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
1349 const union tcp_md5_addr *addr,
1350 int family)
1351{
1352 return NULL;
1353}
YOSHIFUJI Hideaki9501f972008-04-18 12:45:16 +09001354#define tcp_twsk_md5_key(twsk) NULL
1355#endif
1356
Joe Perches5c9f3022013-09-23 11:33:32 -07001357bool tcp_alloc_md5sig_pool(void);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001358
Joe Perches5c9f3022013-09-23 11:33:32 -07001359struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
Eric Dumazet71cea172013-05-20 06:52:26 +00001360static inline void tcp_put_md5sig_pool(void)
1361{
1362 local_bh_enable();
1363}
Eric Dumazet35790c02010-05-16 00:34:04 -07001364
Joe Perches5c9f3022013-09-23 11:33:32 -07001365int tcp_md5_hash_header(struct tcp_md5sig_pool *, const struct tcphdr *);
1366int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1367 unsigned int header_len);
1368int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1369 const struct tcp_md5sig_key *key);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001370
Jerry Chu10467162012-08-31 12:29:11 +00001371/* From tcp_fastopen.c */
Joe Perches5c9f3022013-09-23 11:33:32 -07001372void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1373 struct tcp_fastopen_cookie *cookie, int *syn_loss,
1374 unsigned long *last_syn_loss);
1375void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1376 struct tcp_fastopen_cookie *cookie, bool syn_lost);
Yuchung Cheng783237e2012-07-19 06:43:07 +00001377struct tcp_fastopen_request {
1378 /* Fast Open cookie. Size 0 means a cookie request */
1379 struct tcp_fastopen_cookie cookie;
1380 struct msghdr *data; /* data in MSG_FASTOPEN */
Eric Dumazetf5ddcbb2014-02-20 10:09:18 -08001381 size_t size;
1382 int copied; /* queued in tcp_connect() */
Yuchung Cheng783237e2012-07-19 06:43:07 +00001383};
Yuchung Cheng783237e2012-07-19 06:43:07 +00001384void tcp_free_fastopen_req(struct tcp_sock *tp);
1385
Jerry Chu10467162012-08-31 12:29:11 +00001386extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1387int tcp_fastopen_reset_cipher(void *key, unsigned int len);
Yuchung Cheng843f4a52014-05-11 20:22:11 -07001388bool tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1389 struct request_sock *req,
1390 struct tcp_fastopen_cookie *foc,
1391 struct dst_entry *dst);
Hannes Frederic Sowa222e83d2013-10-19 21:48:58 +02001392void tcp_fastopen_init_key_once(bool publish);
Jerry Chu10467162012-08-31 12:29:11 +00001393#define TCP_FASTOPEN_KEY_LENGTH 16
1394
1395/* Fastopen key context */
1396struct tcp_fastopen_context {
Eric Dumazet7ae86392013-06-25 01:21:06 -07001397 struct crypto_cipher *tfm;
1398 __u8 key[TCP_FASTOPEN_KEY_LENGTH];
1399 struct rcu_head rcu;
Jerry Chu10467162012-08-31 12:29:11 +00001400};
1401
David S. Millerfe067e82007-03-07 12:12:44 -08001402/* write queue abstraction */
1403static inline void tcp_write_queue_purge(struct sock *sk)
1404{
1405 struct sk_buff *skb;
1406
1407 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
Hideo Aoki3ab224b2007-12-31 00:11:19 -08001408 sk_wmem_free_skb(sk, skb);
1409 sk_mem_reclaim(sk);
Ilpo Järvinen8818a9d2009-12-02 22:24:02 -08001410 tcp_clear_all_retrans_hints(tcp_sk(sk));
David S. Millerfe067e82007-03-07 12:12:44 -08001411}
1412
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001413static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001414{
David S. Millercd07a8e2008-09-23 00:50:13 -07001415 return skb_peek(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001416}
1417
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001418static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001419{
David S. Millercd07a8e2008-09-23 00:50:13 -07001420 return skb_peek_tail(&sk->sk_write_queue);
David S. Millerfe067e82007-03-07 12:12:44 -08001421}
1422
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001423static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1424 const struct sk_buff *skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001425{
David S. Millercd07a8e2008-09-23 00:50:13 -07001426 return skb_queue_next(&sk->sk_write_queue, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001427}
1428
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001429static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1430 const struct sk_buff *skb)
Ilpo Järvinen832d11c2008-11-24 21:20:15 -08001431{
1432 return skb_queue_prev(&sk->sk_write_queue, skb);
1433}
1434
David S. Millerfe067e82007-03-07 12:12:44 -08001435#define tcp_for_write_queue(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001436 skb_queue_walk(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001437
1438#define tcp_for_write_queue_from(skb, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001439 skb_queue_walk_from(&(sk)->sk_write_queue, skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001440
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001441#define tcp_for_write_queue_from_safe(skb, tmp, sk) \
David S. Millercd07a8e2008-09-23 00:50:13 -07001442 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
Ilpo Järvinen234b6862007-12-02 00:48:02 +02001443
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001444static inline struct sk_buff *tcp_send_head(const struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001445{
1446 return sk->sk_send_head;
1447}
1448
David S. Millercd07a8e2008-09-23 00:50:13 -07001449static inline bool tcp_skb_is_last(const struct sock *sk,
1450 const struct sk_buff *skb)
1451{
1452 return skb_queue_is_last(&sk->sk_write_queue, skb);
1453}
1454
Eric Dumazetcf533ea2011-10-21 05:22:42 -04001455static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
David S. Millerfe067e82007-03-07 12:12:44 -08001456{
David S. Millercd07a8e2008-09-23 00:50:13 -07001457 if (tcp_skb_is_last(sk, skb))
David S. Millerfe067e82007-03-07 12:12:44 -08001458 sk->sk_send_head = NULL;
David S. Millercd07a8e2008-09-23 00:50:13 -07001459 else
1460 sk->sk_send_head = tcp_write_queue_next(sk, skb);
David S. Millerfe067e82007-03-07 12:12:44 -08001461}
1462
1463static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1464{
1465 if (sk->sk_send_head == skb_unlinked)
1466 sk->sk_send_head = NULL;
1467}
1468
1469static inline void tcp_init_send_head(struct sock *sk)
1470{
1471 sk->sk_send_head = NULL;
1472}
1473
1474static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1475{
1476 __skb_queue_tail(&sk->sk_write_queue, skb);
1477}
1478
1479static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1480{
1481 __tcp_add_write_queue_tail(sk, skb);
1482
1483 /* Queue it, remembering where we must start sending. */
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001484 if (sk->sk_send_head == NULL) {
David S. Millerfe067e82007-03-07 12:12:44 -08001485 sk->sk_send_head = skb;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001486
1487 if (tcp_sk(sk)->highest_sack == NULL)
1488 tcp_sk(sk)->highest_sack = skb;
1489 }
David S. Millerfe067e82007-03-07 12:12:44 -08001490}
1491
1492static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1493{
1494 __skb_queue_head(&sk->sk_write_queue, skb);
1495}
1496
1497/* Insert buff after skb on the write queue of sk. */
1498static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1499 struct sk_buff *buff,
1500 struct sock *sk)
1501{
Gerrit Renker7de6c032008-04-14 00:05:09 -07001502 __skb_queue_after(&sk->sk_write_queue, skb, buff);
David S. Millerfe067e82007-03-07 12:12:44 -08001503}
1504
David S. Miller43f59c82008-09-21 21:28:51 -07001505/* Insert new before skb on the write queue of sk. */
David S. Millerfe067e82007-03-07 12:12:44 -08001506static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1507 struct sk_buff *skb,
1508 struct sock *sk)
1509{
David S. Miller43f59c82008-09-21 21:28:51 -07001510 __skb_queue_before(&sk->sk_write_queue, skb, new);
Ilpo Järvinen6e421412007-11-19 23:24:09 -08001511
1512 if (sk->sk_send_head == skb)
1513 sk->sk_send_head = new;
David S. Millerfe067e82007-03-07 12:12:44 -08001514}
1515
1516static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1517{
1518 __skb_unlink(skb, &sk->sk_write_queue);
1519}
1520
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001521static inline bool tcp_write_queue_empty(struct sock *sk)
David S. Millerfe067e82007-03-07 12:12:44 -08001522{
1523 return skb_queue_empty(&sk->sk_write_queue);
1524}
1525
Krishna Kumar12d50c42009-12-08 22:26:13 +00001526static inline void tcp_push_pending_frames(struct sock *sk)
1527{
1528 if (tcp_send_head(sk)) {
1529 struct tcp_sock *tp = tcp_sk(sk);
1530
1531 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1532 }
1533}
1534
Neal Cardwellecb97192012-02-27 17:52:52 -05001535/* Start sequence of the skb just after the highest skb with SACKed
1536 * bit, valid only if sacked_out > 0 or when the caller has ensured
1537 * validity by itself.
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001538 */
1539static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1540{
1541 if (!tp->sacked_out)
1542 return tp->snd_una;
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001543
1544 if (tp->highest_sack == NULL)
1545 return tp->snd_nxt;
1546
Ilpo Järvinena47e5a92007-11-15 19:41:46 -08001547 return TCP_SKB_CB(tp->highest_sack)->seq;
1548}
1549
Ilpo Järvinen6859d492007-12-02 00:48:06 +02001550static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1551{
1552 tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1553 tcp_write_queue_next(sk, skb);
1554}
1555
1556static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1557{
1558 return tcp_sk(sk)->highest_sack;
1559}
1560
1561static inline void tcp_highest_sack_reset(struct sock *sk)
1562{
1563 tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1564}
1565
1566/* Called when old skb is about to be deleted (to be combined with new skb) */
1567static inline void tcp_highest_sack_combine(struct sock *sk,
1568 struct sk_buff *old,
1569 struct sk_buff *new)
1570{
1571 if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1572 tcp_sk(sk)->highest_sack = new;
1573}
1574
Andreas Petlund5aa4b322010-02-18 02:45:45 +00001575/* Determines whether this is a thin stream (which may suffer from
1576 * increased latency). Used to trigger latency-reducing mechanisms.
1577 */
Eric Dumazeta2a385d2012-05-16 23:15:34 +00001578static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
Andreas Petlund5aa4b322010-02-18 02:45:45 +00001579{
1580 return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1581}
1582
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583/* /proc */
1584enum tcp_seq_states {
1585 TCP_SEQ_STATE_LISTENING,
1586 TCP_SEQ_STATE_OPENREQ,
1587 TCP_SEQ_STATE_ESTABLISHED,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588};
1589
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001590int tcp_seq_open(struct inode *inode, struct file *file);
1591
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592struct tcp_seq_afinfo {
Arjan van de Ven73cb88e2011-10-30 06:46:30 +00001593 char *name;
1594 sa_family_t family;
1595 const struct file_operations *seq_fops;
1596 struct seq_operations seq_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597};
1598
1599struct tcp_iter_state {
Denis V. Luneva4146b12008-04-13 22:11:14 -07001600 struct seq_net_private p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 sa_family_t family;
1602 enum tcp_seq_states state;
1603 struct sock *syn_wait_sk;
Eric W. Biedermana7cb5a42012-05-24 01:10:10 -06001604 int bucket, offset, sbucket, num;
1605 kuid_t uid;
Tom Herberta8b690f2010-06-07 00:43:42 -07001606 loff_t last_pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607};
1608
Joe Perches5c9f3022013-09-23 11:33:32 -07001609int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1610void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001611
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001612extern struct request_sock_ops tcp_request_sock_ops;
Glenn Griffinc6aefaf2008-02-07 21:49:26 -08001613extern struct request_sock_ops tcp6_request_sock_ops;
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001614
Joe Perches5c9f3022013-09-23 11:33:32 -07001615void tcp_v4_destroy_sock(struct sock *sk);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001616
Eric Dumazet28be6e02013-10-18 10:36:17 -07001617struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
Joe Perches5c9f3022013-09-23 11:33:32 -07001618 netdev_features_t features);
1619struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1620int tcp_gro_complete(struct sk_buff *skb);
Daniel Borkmann28850dc2013-06-07 05:11:46 +00001621
Joe Perches5c9f3022013-09-23 11:33:32 -07001622void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
Herbert Xuf4c50d92006-06-22 03:02:40 -07001623
Eric Dumazetc9bee3b72013-07-22 20:27:07 -07001624static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1625{
1626 return tp->notsent_lowat ?: sysctl_tcp_notsent_lowat;
1627}
1628
1629static inline bool tcp_stream_memory_free(const struct sock *sk)
1630{
1631 const struct tcp_sock *tp = tcp_sk(sk);
1632 u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1633
1634 return notsent_bytes < tcp_notsent_lowat(tp);
1635}
1636
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001637#ifdef CONFIG_PROC_FS
Joe Perches5c9f3022013-09-23 11:33:32 -07001638int tcp4_proc_init(void);
1639void tcp4_proc_exit(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001640#endif
1641
Octavian Purdila5db92c92014-06-25 17:09:59 +03001642int tcp_rtx_synack(struct sock *sk, struct request_sock *req);
Octavian Purdila1fb6f152014-06-25 17:10:02 +03001643int tcp_conn_request(struct request_sock_ops *rsk_ops,
1644 const struct tcp_request_sock_ops *af_ops,
1645 struct sock *sk, struct sk_buff *skb);
Octavian Purdila5db92c92014-06-25 17:09:59 +03001646
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001647/* TCP af-specific functions */
1648struct tcp_sock_af_ops {
1649#ifdef CONFIG_TCP_MD5SIG
1650 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1651 struct sock *addr_sk);
1652 int (*calc_md5_hash) (char *location,
1653 struct tcp_md5sig_key *md5,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001654 const struct sock *sk,
1655 const struct request_sock *req,
1656 const struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001657 int (*md5_parse) (struct sock *sk,
1658 char __user *optval,
1659 int optlen);
1660#endif
1661};
1662
1663struct tcp_request_sock_ops {
Octavian Purdila2aec4a22014-06-25 17:10:00 +03001664 u16 mss_clamp;
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001665#ifdef CONFIG_TCP_MD5SIG
1666 struct tcp_md5sig_key *(*md5_lookup) (struct sock *sk,
1667 struct request_sock *req);
John Dykstrae3afe7b2009-07-16 05:04:51 +00001668 int (*calc_md5_hash) (char *location,
1669 struct tcp_md5sig_key *md5,
Eric Dumazet318cf7a2011-10-24 02:46:04 -04001670 const struct sock *sk,
1671 const struct request_sock *req,
1672 const struct sk_buff *skb);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001673#endif
Octavian Purdila16bea702014-06-25 17:09:53 +03001674 void (*init_req)(struct request_sock *req, struct sock *sk,
1675 struct sk_buff *skb);
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001676#ifdef CONFIG_SYN_COOKIES
1677 __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
1678 __u16 *mss);
1679#endif
Octavian Purdilad94e0412014-06-25 17:09:55 +03001680 struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl,
1681 const struct request_sock *req,
1682 bool *strict);
Octavian Purdila936b8bd2014-06-25 17:09:57 +03001683 __u32 (*init_seq)(const struct sk_buff *skb);
Octavian Purdilad6274bd2014-06-25 17:09:58 +03001684 int (*send_synack)(struct sock *sk, struct dst_entry *dst,
1685 struct flowi *fl, struct request_sock *req,
1686 u16 queue_mapping, struct tcp_fastopen_cookie *foc);
Octavian Purdila695da142014-06-25 17:10:01 +03001687 void (*queue_hash_add)(struct sock *sk, struct request_sock *req,
1688 const unsigned long timeout);
YOSHIFUJI Hideakicfb6eeb2006-11-14 19:07:45 -08001689};
1690
Octavian Purdilafb7b37a2014-06-25 17:09:54 +03001691#ifdef CONFIG_SYN_COOKIES
1692static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1693 struct sock *sk, struct sk_buff *skb,
1694 __u16 *mss)
1695{
1696 return ops->cookie_init_seq(sk, skb, mss);
1697}
1698#else
1699static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1700 struct sock *sk, struct sk_buff *skb,
1701 __u16 *mss)
1702{
1703 return 0;
1704}
1705#endif
1706
Joe Perches5c9f3022013-09-23 11:33:32 -07001707int tcpv4_offload_init(void);
Daniel Borkmann28850dc2013-06-07 05:11:46 +00001708
Joe Perches5c9f3022013-09-23 11:33:32 -07001709void tcp_v4_init(void);
1710void tcp_init(void);
Arnaldo Carvalho de Melo20380732005-08-16 02:18:02 -03001711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712#endif /* _TCP_H */