blob: 61023d50cd604d5e19464a32c33b65d29c75c81e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * Implementation of the Transmission Control Protocol(TCP).
7 *
Jesper Juhl02c30a82005-05-05 16:16:16 -07008 * Authors: Ross Biro
Linus Torvalds1da177e2005-04-16 15:20:36 -07009 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Mark Evans, <evansmp@uhura.aston.ac.uk>
11 * Corey Minyard <wf-rch!minyard@relay.EU.net>
12 * Florian La Roche, <flla@stud.uni-sb.de>
13 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
14 * Linus Torvalds, <torvalds@cs.helsinki.fi>
15 * Alan Cox, <gw4pts@gw4pts.ampr.org>
16 * Matthew Dillon, <dillon@apollo.west.oic.com>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Jorge Cwik, <jorge@laser.satlink.net>
19 */
20
21#include <linux/module.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <net/tcp.h>
24
Wei Yongjun55477202018-07-25 06:06:07 +000025static u32 tcp_retransmit_stamp(const struct sock *sk)
Jon Maxwella7fa3772018-07-19 11:14:43 +100026{
27 u32 start_ts = tcp_sk(sk)->retrans_stamp;
28
29 if (unlikely(!start_ts)) {
30 struct sk_buff *head = tcp_rtx_queue_head(sk);
31
32 if (!head)
33 return 0;
34 start_ts = tcp_skb_timestamp(head);
35 }
36 return start_ts;
37}
38
Jon Maxwellb701a992018-07-19 11:14:44 +100039static u32 tcp_clamp_rto_to_user_timeout(const struct sock *sk)
40{
41 struct inet_connection_sock *icsk = inet_csk(sk);
42 u32 elapsed, start_ts;
43
44 start_ts = tcp_retransmit_stamp(sk);
45 if (!icsk->icsk_user_timeout || !start_ts)
46 return icsk->icsk_rto;
47 elapsed = tcp_time_stamp(tcp_sk(sk)) - start_ts;
48 if (elapsed >= icsk->icsk_user_timeout)
49 return 1; /* user timeout has passed; fire ASAP */
50 else
51 return min_t(u32, icsk->icsk_rto, msecs_to_jiffies(icsk->icsk_user_timeout - elapsed));
52}
53
Richard Sailerc380d372016-07-16 04:04:34 +020054/**
55 * tcp_write_err() - close socket and save error info
56 * @sk: The socket the error has appeared on.
57 *
58 * Returns: Nothing (void)
59 */
60
Linus Torvalds1da177e2005-04-16 15:20:36 -070061static void tcp_write_err(struct sock *sk)
62{
63 sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
64 sk->sk_error_report(sk);
65
Soheil Hassas Yeganehe05836a2018-03-06 17:15:12 -050066 tcp_write_queue_purge(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 tcp_done(sk);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -070068 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONTIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -070069}
70
Richard Sailerc380d372016-07-16 04:04:34 +020071/**
72 * tcp_out_of_resources() - Close socket if out of resources
73 * @sk: pointer to current socket
74 * @do_reset: send a last packet with reset flag
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 *
Richard Sailerc380d372016-07-16 04:04:34 +020076 * Do not allow orphaned sockets to eat all our resources.
77 * This is direct violation of TCP specs, but it is required
78 * to prevent DoS attacks. It is called when a retransmission timeout
79 * or zero probe timeout occurs on orphaned socket.
80 *
Dan Streetman4ee806d2018-01-18 16:14:26 -050081 * Also close if our net namespace is exiting; in that case there is no
82 * hope of ever communicating again since all netns interfaces are already
83 * down (or about to be down), and we need to release our dst references,
84 * which have been moved to the netns loopback interface, so the namespace
85 * can finish exiting. This condition is only possible if we are a kernel
86 * socket, as those do not hold references to the namespace.
87 *
Richard Sailerc380d372016-07-16 04:04:34 +020088 * Criteria is still not confirmed experimentally and may change.
89 * We kill the socket, if:
90 * 1. If number of orphaned sockets exceeds an administratively configured
91 * limit.
92 * 2. If we have strong memory pressure.
Dan Streetman4ee806d2018-01-18 16:14:26 -050093 * 3. If our net namespace is exiting.
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 */
Yuchung Chengb2482302014-09-29 13:20:38 -070095static int tcp_out_of_resources(struct sock *sk, bool do_reset)
Linus Torvalds1da177e2005-04-16 15:20:36 -070096{
97 struct tcp_sock *tp = tcp_sk(sk);
David S. Millerad1af0f2010-08-25 02:27:49 -070098 int shift = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900100 /* If peer does not open window for long time, or did not transmit
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 * anything for long time, penalize it. */
Eric Dumazetd635fbe2017-05-16 14:00:03 -0700102 if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
David S. Millerad1af0f2010-08-25 02:27:49 -0700103 shift++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104
105 /* If some dubious ICMP arrived, penalize even more. */
106 if (sk->sk_err_soft)
David S. Millerad1af0f2010-08-25 02:27:49 -0700107 shift++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108
Arun Sharmaefcdbf22012-01-30 14:16:06 -0800109 if (tcp_check_oom(sk, shift)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 /* Catch exceptional cases, when connection requires reset.
111 * 1. Last segment was sent recently. */
Eric Dumazetd635fbe2017-05-16 14:00:03 -0700112 if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 /* 2. Window is closed. */
114 (!tp->snd_wnd && !tp->packets_out))
Yuchung Chengb2482302014-09-29 13:20:38 -0700115 do_reset = true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 if (do_reset)
117 tcp_send_active_reset(sk, GFP_ATOMIC);
118 tcp_done(sk);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700119 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTONMEMORY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 return 1;
121 }
Dan Streetman4ee806d2018-01-18 16:14:26 -0500122
123 if (!check_net(sock_net(sk))) {
124 /* Not possible to send reset; just close */
125 tcp_done(sk);
126 return 1;
127 }
128
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 return 0;
130}
131
Richard Sailerc380d372016-07-16 04:04:34 +0200132/**
133 * tcp_orphan_retries() - Returns maximal number of retries on an orphaned socket
134 * @sk: Pointer to the current socket.
135 * @alive: bool, socket alive state
136 */
Richard Sailer7533ce32015-10-09 02:41:37 +0200137static int tcp_orphan_retries(struct sock *sk, bool alive)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
Nikolay Borisovc402d9b2016-02-03 09:46:55 +0200139 int retries = sock_net(sk)->ipv4.sysctl_tcp_orphan_retries; /* May be zero. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 /* We know from an ICMP that something is wrong. */
142 if (sk->sk_err_soft && !alive)
143 retries = 0;
144
145 /* However, if socket sent something recently, select some safe
146 * number of retries. 8 corresponds to >100 seconds with minimal
147 * RTO of 200msec. */
148 if (retries == 0 && alive)
149 retries = 8;
150 return retries;
151}
152
Eric Dumazetce55dd32007-12-21 01:50:43 -0800153static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
154{
Eric Dumazetd0f36842017-11-03 06:09:17 -0700155 const struct net *net = sock_net(sk);
156 int mss;
Fan Dub0f9ca52015-02-10 09:53:16 +0800157
Eric Dumazetce55dd32007-12-21 01:50:43 -0800158 /* Black hole detection */
Eric Dumazetd0f36842017-11-03 06:09:17 -0700159 if (!net->ipv4.sysctl_tcp_mtu_probing)
160 return;
David S. Miller829942c2007-12-21 04:29:16 -0800161
Eric Dumazetd0f36842017-11-03 06:09:17 -0700162 if (!icsk->icsk_mtup.enabled) {
163 icsk->icsk_mtup.enabled = 1;
164 icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
165 } else {
166 mss = tcp_mtu_to_mss(sk, icsk->icsk_mtup.search_low) >> 1;
167 mss = min(net->ipv4.sysctl_tcp_base_mss, mss);
168 mss = max(mss, 68 - tcp_sk(sk)->tcp_header_len);
169 icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
Eric Dumazetce55dd32007-12-21 01:50:43 -0800170 }
Eric Dumazetd0f36842017-11-03 06:09:17 -0700171 tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
Eric Dumazetce55dd32007-12-21 01:50:43 -0800172}
173
Richard Sailerc380d372016-07-16 04:04:34 +0200174
175/**
176 * retransmits_timed_out() - returns true if this connection has timed out
177 * @sk: The current socket
178 * @boundary: max number of retransmissions
179 * @timeout: A custom timeout value.
180 * If set to 0 the default timeout is calculated and used.
181 * Using TCP_RTO_MIN and the number of unsuccessful retransmits.
Richard Sailerc380d372016-07-16 04:04:34 +0200182 *
183 * The default "timeout" value this function can calculate and use
184 * is equivalent to the timeout of a TCP Connection
185 * after "boundary" unsuccessful, exponentially backed-off
Eric Dumazetce682ef2017-05-23 12:38:35 -0700186 * retransmissions with an initial RTO of TCP_RTO_MIN.
Damian Lukowski2f7de572009-12-07 06:06:16 +0000187 */
188static bool retransmits_timed_out(struct sock *sk,
Jerry Chudca43c72010-08-27 19:13:28 +0000189 unsigned int boundary,
Eric Dumazetce682ef2017-05-23 12:38:35 -0700190 unsigned int timeout)
Damian Lukowski2f7de572009-12-07 06:06:16 +0000191{
Eric Dumazetce682ef2017-05-23 12:38:35 -0700192 const unsigned int rto_base = TCP_RTO_MIN;
Eric Dumazet9a568de2017-05-16 14:00:14 -0700193 unsigned int linear_backoff_thresh, start_ts;
Damian Lukowski2f7de572009-12-07 06:06:16 +0000194
195 if (!inet_csk(sk)->icsk_retransmits)
196 return false;
197
Jon Maxwella7fa3772018-07-19 11:14:43 +1000198 start_ts = tcp_retransmit_stamp(sk);
199 if (!start_ts)
200 return false;
Damian Lukowski2f7de572009-12-07 06:06:16 +0000201
Jerry Chudca43c72010-08-27 19:13:28 +0000202 if (likely(timeout == 0)) {
David S. Miller21a180c2010-10-04 11:56:38 -0700203 linear_backoff_thresh = ilog2(TCP_RTO_MAX/rto_base);
Damian Lukowski2f7de572009-12-07 06:06:16 +0000204
Jerry Chudca43c72010-08-27 19:13:28 +0000205 if (boundary <= linear_backoff_thresh)
David S. Miller21a180c2010-10-04 11:56:38 -0700206 timeout = ((2 << boundary) - 1) * rto_base;
Jerry Chudca43c72010-08-27 19:13:28 +0000207 else
David S. Miller21a180c2010-10-04 11:56:38 -0700208 timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
Jerry Chudca43c72010-08-27 19:13:28 +0000209 (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
Jon Maxwell9bcc66e2018-07-19 11:14:42 +1000210 timeout = jiffies_to_msecs(timeout);
Jerry Chudca43c72010-08-27 19:13:28 +0000211 }
Jon Maxwell9bcc66e2018-07-19 11:14:42 +1000212 return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= timeout;
Damian Lukowski2f7de572009-12-07 06:06:16 +0000213}
214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215/* A write timeout has occurred. Process the after effects. */
216static int tcp_write_timeout(struct sock *sk)
217{
John Heffner5d424d52006-03-20 17:53:41 -0800218 struct inet_connection_sock *icsk = inet_csk(sk);
Yuchung Chengc9686012013-10-29 10:09:05 -0700219 struct tcp_sock *tp = tcp_sk(sk);
Nikolay Borisov6fa25162016-02-03 09:46:49 +0200220 struct net *net = sock_net(sk);
Eric Dumazetce682ef2017-05-23 12:38:35 -0700221 bool expired, do_reset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 int retry_until;
223
224 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
Yuchung Chengc9686012013-10-29 10:09:05 -0700225 if (icsk->icsk_retransmits) {
Eric Dumazetb6c67122010-04-08 23:03:29 +0000226 dst_negative_advice(sk);
Lawrence Brakmo3acf3ec2016-09-27 19:03:37 -0700227 } else if (!tp->syn_data && !tp->syn_fastopen) {
228 sk_rethink_txhash(sk);
Yuchung Chengc9686012013-10-29 10:09:05 -0700229 }
Nikolay Borisov6fa25162016-02-03 09:46:49 +0200230 retry_until = icsk->icsk_syn_retries ? : net->ipv4.sysctl_tcp_syn_retries;
Eric Dumazetce682ef2017-05-23 12:38:35 -0700231 expired = icsk->icsk_retransmits >= retry_until;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 } else {
Eric Dumazetce682ef2017-05-23 12:38:35 -0700233 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1, 0)) {
John Heffner5d424d52006-03-20 17:53:41 -0800234 /* Black hole detection */
Eric Dumazetce55dd32007-12-21 01:50:43 -0800235 tcp_mtu_probing(icsk, sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Eric Dumazetb6c67122010-04-08 23:03:29 +0000237 dst_negative_advice(sk);
Lawrence Brakmo3acf3ec2016-09-27 19:03:37 -0700238 } else {
239 sk_rethink_txhash(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
241
Nikolay Borisovc6214a92016-02-03 09:46:54 +0200242 retry_until = net->ipv4.sysctl_tcp_retries2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 if (sock_flag(sk, SOCK_DEAD)) {
Richard Sailer7533ce32015-10-09 02:41:37 +0200244 const bool alive = icsk->icsk_rto < TCP_RTO_MAX;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 retry_until = tcp_orphan_retries(sk, alive);
Damian Lukowski6fa12c82009-08-26 00:16:34 +0000247 do_reset = alive ||
Eric Dumazetce682ef2017-05-23 12:38:35 -0700248 !retransmits_timed_out(sk, retry_until, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
Damian Lukowski6fa12c82009-08-26 00:16:34 +0000250 if (tcp_out_of_resources(sk, do_reset))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 return 1;
252 }
Eric Dumazetce682ef2017-05-23 12:38:35 -0700253 expired = retransmits_timed_out(sk, retry_until,
254 icsk->icsk_user_timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 }
Yuchung Cheng72685862017-12-12 13:10:40 -0800256 tcp_fastopen_active_detect_blackhole(sk, expired);
Lawrence Brakmof89013f2018-01-25 16:14:11 -0800257
258 if (BPF_SOCK_OPS_TEST_FLAG(tp, BPF_SOCK_OPS_RTO_CB_FLAG))
259 tcp_call_bpf_3arg(sk, BPF_SOCK_OPS_RTO_CB,
260 icsk->icsk_retransmits,
261 icsk->icsk_rto, (int)expired);
262
Eric Dumazetce682ef2017-05-23 12:38:35 -0700263 if (expired) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 /* Has it gone just too far? */
265 tcp_write_err(sk);
266 return 1;
267 }
Lawrence Brakmof89013f2018-01-25 16:14:11 -0800268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 return 0;
270}
271
Eric Dumazetc10d9312016-04-29 14:16:47 -0700272/* Called with BH disabled */
Eric Dumazet6f458df2012-07-20 05:45:50 +0000273void tcp_delack_timer_handler(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700275 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276
David S. Miller9993e7d2008-01-10 21:56:38 -0800277 sk_mem_reclaim_partial(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800279 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
280 !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 goto out;
282
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700283 if (time_after(icsk->icsk_ack.timeout, jiffies)) {
284 sk_reset_timer(sk, &icsk->icsk_delack_timer, icsk->icsk_ack.timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 goto out;
286 }
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700287 icsk->icsk_ack.pending &= ~ICSK_ACK_TIMER;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700289 if (inet_csk_ack_scheduled(sk)) {
290 if (!icsk->icsk_ack.pingpong) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291 /* Delayed ACK missed: inflate ATO. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700292 icsk->icsk_ack.ato = min(icsk->icsk_ack.ato << 1, icsk->icsk_rto);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 } else {
294 /* Delayed ACK missed: leave pingpong mode and
295 * deflate ATO.
296 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700297 icsk->icsk_ack.pingpong = 0;
298 icsk->icsk_ack.ato = TCP_ATO_MIN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299 }
Eric Dumazet4688eb72017-12-12 18:22:52 -0800300 tcp_mstamp_refresh(tcp_sk(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 tcp_send_ack(sk);
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700302 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
305out:
Eric Dumazetb8da51e2015-05-15 12:39:27 -0700306 if (tcp_under_memory_pressure(sk))
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800307 sk_mem_reclaim(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000308}
309
Richard Sailerc380d372016-07-16 04:04:34 +0200310
311/**
312 * tcp_delack_timer() - The TCP delayed ACK timeout handler
313 * @data: Pointer to the current socket. (gets casted to struct sock *)
314 *
315 * This function gets (indirectly) called when the kernel timer for a TCP packet
316 * of this socket expires. Calls tcp_delack_timer_handler() to do the actual work.
317 *
318 * Returns: Nothing (void)
319 */
Kees Cook59f379f2017-10-16 17:29:19 -0700320static void tcp_delack_timer(struct timer_list *t)
Eric Dumazet6f458df2012-07-20 05:45:50 +0000321{
Kees Cook59f379f2017-10-16 17:29:19 -0700322 struct inet_connection_sock *icsk =
323 from_timer(icsk, t, icsk_delack_timer);
324 struct sock *sk = &icsk->icsk_inet.sk;
Eric Dumazet6f458df2012-07-20 05:45:50 +0000325
326 bh_lock_sock(sk);
327 if (!sock_owned_by_user(sk)) {
328 tcp_delack_timer_handler(sk);
329 } else {
Kees Cook59f379f2017-10-16 17:29:19 -0700330 icsk->icsk_ack.blocked = 1;
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700331 __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000332 /* deleguate our work to tcp_release_cb() */
Eric Dumazet7aa54702016-12-03 11:14:57 -0800333 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
Eric Dumazet144d56e2012-08-20 00:22:46 +0000334 sock_hold(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000335 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 bh_unlock_sock(sk);
337 sock_put(sk);
338}
339
340static void tcp_probe_timer(struct sock *sk)
341{
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300342 struct inet_connection_sock *icsk = inet_csk(sk);
Eric Dumazet75c119a2017-10-05 22:21:27 -0700343 struct sk_buff *skb = tcp_send_head(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 struct tcp_sock *tp = tcp_sk(sk);
345 int max_probes;
Yuchung Chengb2482302014-09-29 13:20:38 -0700346 u32 start_ts;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
Eric Dumazet75c119a2017-10-05 22:21:27 -0700348 if (tp->packets_out || !skb) {
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300349 icsk->icsk_probes_out = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350 return;
351 }
352
Yuchung Chengb2482302014-09-29 13:20:38 -0700353 /* RFC 1122 4.2.2.17 requires the sender to stay open indefinitely as
354 * long as the receiver continues to respond probes. We support this by
355 * default and reset icsk_probes_out with incoming ACKs. But if the
356 * socket is orphaned or the user specifies TCP_USER_TIMEOUT, we
357 * kill the socket when the retry count and the time exceeds the
358 * corresponding system limit. We also implement similar policy when
359 * we use RTO to probe window in tcp_retransmit_timer().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 */
Eric Dumazet75c119a2017-10-05 22:21:27 -0700361 start_ts = tcp_skb_timestamp(skb);
Yuchung Chengb2482302014-09-29 13:20:38 -0700362 if (!start_ts)
Eric Dumazetd3edd062018-09-21 08:51:50 -0700363 skb->skb_mstamp_ns = tp->tcp_wstamp_ns;
Yuchung Chengb2482302014-09-29 13:20:38 -0700364 else if (icsk->icsk_user_timeout &&
Jon Maxwell9bcc66e2018-07-19 11:14:42 +1000365 (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
Yuchung Chengb2482302014-09-29 13:20:38 -0700366 goto abort;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Nikolay Borisovc6214a92016-02-03 09:46:54 +0200368 max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 if (sock_flag(sk, SOCK_DEAD)) {
Richard Sailer7533ce32015-10-09 02:41:37 +0200370 const bool alive = inet_csk_rto_backoff(icsk, TCP_RTO_MAX) < TCP_RTO_MAX;
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900371
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 max_probes = tcp_orphan_retries(sk, alive);
Yuchung Chengb2482302014-09-29 13:20:38 -0700373 if (!alive && icsk->icsk_backoff >= max_probes)
374 goto abort;
375 if (tcp_out_of_resources(sk, true))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 return;
377 }
378
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300379 if (icsk->icsk_probes_out > max_probes) {
Yuchung Chengb2482302014-09-29 13:20:38 -0700380abort: tcp_write_err(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 } else {
382 /* Only send another probe if we didn't close things up. */
383 tcp_send_probe0(sk);
384 }
385}
386
387/*
Jerry Chu83368862012-08-31 12:29:12 +0000388 * Timer for Fast Open socket to retransmit SYNACK. Note that the
389 * sk here is the child socket, not the parent (listener) socket.
390 */
391static void tcp_fastopen_synack_timer(struct sock *sk)
392{
393 struct inet_connection_sock *icsk = inet_csk(sk);
394 int max_retries = icsk->icsk_syn_retries ? :
Nikolay Borisov7c083ec2016-02-03 09:46:50 +0200395 sock_net(sk)->ipv4.sysctl_tcp_synack_retries + 1; /* add one more retry for fastopen */
Jerry Chu83368862012-08-31 12:29:12 +0000396 struct request_sock *req;
397
398 req = tcp_sk(sk)->fastopen_rsk;
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700399 req->rsk_ops->syn_ack_timeout(req);
Jerry Chu83368862012-08-31 12:29:12 +0000400
Eric Dumazete6c022a2012-10-27 23:16:46 +0000401 if (req->num_timeout >= max_retries) {
Jerry Chu83368862012-08-31 12:29:12 +0000402 tcp_write_err(sk);
403 return;
404 }
405 /* XXX (TFO) - Unlike regular SYN-ACK retransmit, we ignore error
406 * returned from rtx_syn_ack() to make it more persistent like
407 * regular retransmit because if the child socket has been accepted
408 * it's not good to give up too easily.
409 */
Eric Dumazete6c022a2012-10-27 23:16:46 +0000410 inet_rtx_syn_ack(sk, req);
411 req->num_timeout++;
Yuchung Cheng7e32b442016-09-21 16:16:15 -0700412 icsk->icsk_retransmits++;
Jerry Chu83368862012-08-31 12:29:12 +0000413 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
Eric Dumazete6c022a2012-10-27 23:16:46 +0000414 TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX);
Jerry Chu83368862012-08-31 12:29:12 +0000415}
416
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417
Richard Sailerc380d372016-07-16 04:04:34 +0200418/**
419 * tcp_retransmit_timer() - The TCP retransmit timeout handler
420 * @sk: Pointer to the current socket.
421 *
422 * This function gets called when the kernel timer for a TCP packet
423 * of this socket expires.
424 *
425 * It handles retransmission, timer adjustment and other necesarry measures.
426 *
427 * Returns: Nothing (void)
428 */
Damian Lukowskif1ecd5d2009-08-26 00:16:31 +0000429void tcp_retransmit_timer(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430{
431 struct tcp_sock *tp = tcp_sk(sk);
Nikolay Borisovae5c3f42016-02-03 09:46:53 +0200432 struct net *net = sock_net(sk);
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700433 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Jerry Chu83368862012-08-31 12:29:12 +0000435 if (tp->fastopen_rsk) {
Jerry Chu37561f62012-10-22 11:26:36 +0000436 WARN_ON_ONCE(sk->sk_state != TCP_SYN_RECV &&
437 sk->sk_state != TCP_FIN_WAIT1);
Jerry Chu83368862012-08-31 12:29:12 +0000438 tcp_fastopen_synack_timer(sk);
439 /* Before we receive ACK to our SYN-ACK don't retransmit
440 * anything else (e.g., data or FIN segments).
441 */
442 return;
443 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 if (!tp->packets_out)
445 goto out;
446
Eric Dumazet75c119a2017-10-05 22:21:27 -0700447 WARN_ON(tcp_rtx_queue_empty(sk));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Nandita Dukkipati9b717a82013-03-11 10:00:44 +0000449 tp->tlp_high_seq = 0;
450
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 if (!tp->snd_wnd && !sock_flag(sk, SOCK_DEAD) &&
452 !((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))) {
453 /* Receiver dastardly shrinks window. Our retransmits
454 * become zero probes, but we should not timeout this
455 * connection. If the socket is an orphan, time it out,
456 * we cannot allow such beasts to hang infinitely.
457 */
YOSHIFUJI Hideaki569508c2008-04-14 04:09:36 -0700458 struct inet_sock *inet = inet_sk(sk);
459 if (sk->sk_family == AF_INET) {
Joe Perchesba7a46f2014-11-11 10:59:17 -0800460 net_dbg_ratelimited("Peer %pI4:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
461 &inet->inet_daddr,
462 ntohs(inet->inet_dport),
463 inet->inet_num,
464 tp->snd_una, tp->snd_nxt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 }
Eric Dumazetdfd56b82011-12-10 09:48:31 +0000466#if IS_ENABLED(CONFIG_IPV6)
YOSHIFUJI Hideaki569508c2008-04-14 04:09:36 -0700467 else if (sk->sk_family == AF_INET6) {
Joe Perchesba7a46f2014-11-11 10:59:17 -0800468 net_dbg_ratelimited("Peer %pI6:%u/%u unexpectedly shrunk window %u:%u (repaired)\n",
469 &sk->sk_v6_daddr,
470 ntohs(inet->inet_dport),
471 inet->inet_num,
472 tp->snd_una, tp->snd_nxt);
YOSHIFUJI Hideaki569508c2008-04-14 04:09:36 -0700473 }
474#endif
Eric Dumazet70eabf02017-05-16 14:00:07 -0700475 if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 tcp_write_err(sk);
477 goto out;
478 }
Neal Cardwell5ae344c2014-08-04 19:12:29 -0400479 tcp_enter_loss(sk);
Eric Dumazet75c119a2017-10-05 22:21:27 -0700480 tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 __sk_dst_reset(sk);
482 goto out_reset_timer;
483 }
484
485 if (tcp_write_timeout(sk))
486 goto out;
487
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700488 if (icsk->icsk_retransmits == 0) {
Pavel Emelyanov40b215e2008-07-03 01:05:41 -0700489 int mib_idx;
490
Ilpo Järvinenc60ce4e2010-10-14 01:52:09 +0000491 if (icsk->icsk_ca_state == TCP_CA_Recovery) {
Ilpo Järvinenbc079e92009-02-28 04:44:34 +0000492 if (tcp_is_sack(tp))
493 mib_idx = LINUX_MIB_TCPSACKRECOVERYFAIL;
494 else
495 mib_idx = LINUX_MIB_TCPRENORECOVERYFAIL;
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300496 } else if (icsk->icsk_ca_state == TCP_CA_Loss) {
Pavel Emelyanov40b215e2008-07-03 01:05:41 -0700497 mib_idx = LINUX_MIB_TCPLOSSFAILURES;
Ilpo Järvinenc60ce4e2010-10-14 01:52:09 +0000498 } else if ((icsk->icsk_ca_state == TCP_CA_Disorder) ||
499 tp->sacked_out) {
500 if (tcp_is_sack(tp))
501 mib_idx = LINUX_MIB_TCPSACKFAILURES;
502 else
503 mib_idx = LINUX_MIB_TCPRENOFAILURES;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 } else {
Pavel Emelyanov40b215e2008-07-03 01:05:41 -0700505 mib_idx = LINUX_MIB_TCPTIMEOUTS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700506 }
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700507 __NET_INC_STATS(sock_net(sk), mib_idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 }
509
Neal Cardwell5ae344c2014-08-04 19:12:29 -0400510 tcp_enter_loss(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700511
Eric Dumazet75c119a2017-10-05 22:21:27 -0700512 if (tcp_retransmit_skb(sk, tcp_rtx_queue_head(sk), 1) > 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 /* Retransmission failed because of local congestion,
514 * do not backoff.
515 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700516 if (!icsk->icsk_retransmits)
517 icsk->icsk_retransmits = 1;
518 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
Arnaldo Carvalho de Melo3f421ba2005-08-09 20:11:08 -0700519 min(icsk->icsk_rto, TCP_RESOURCE_PROBE_INTERVAL),
520 TCP_RTO_MAX);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 goto out;
522 }
523
524 /* Increase the timeout each time we retransmit. Note that
525 * we do not increase the rtt estimate. rto is initialized
526 * from rtt, but increases here. Jacobson (SIGCOMM 88) suggests
527 * that doubling rto each time is the least we can get away with.
528 * In KA9Q, Karn uses this for the first few times, and then
529 * goes to quadratic. netBSD doubles, but only goes up to *64,
530 * and clamps at 1 to 64 sec afterwards. Note that 120 sec is
531 * defined in the protocol as the maximum possible RTT. I guess
532 * we'll have to use something other than TCP to talk to the
533 * University of Mars.
534 *
535 * PAWS allows us longer timeouts and large windows, so once
536 * implemented ftp to mars will work nicely. We will have to fix
537 * the 120 second clamps though!
538 */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700539 icsk->icsk_backoff++;
540 icsk->icsk_retransmits++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
542out_reset_timer:
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000543 /* If stream is thin, use linear timeouts. Since 'icsk_backoff' is
544 * used to reset timer, set to 0. Recalculate 'icsk_rto' as this
545 * might be increased if the stream oscillates between thin and thick,
546 * thus the old value might already be too high compared to the value
547 * set by 'tcp_set_rto' in tcp_input.c which resets the rto without
548 * backoff. Limit to TCP_THIN_LINEAR_RETRIES before initiating
549 * exponential backoff behaviour to avoid continue hammering
550 * linear-timeout retransmissions into a black hole
551 */
552 if (sk->sk_state == TCP_ESTABLISHED &&
Eric Dumazet2c04ac82017-10-26 21:54:58 -0700553 (tp->thin_lto || net->ipv4.sysctl_tcp_thin_linear_timeouts) &&
Andreas Petlund36e31b0a2010-02-18 02:47:01 +0000554 tcp_stream_is_thin(tp) &&
555 icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) {
556 icsk->icsk_backoff = 0;
557 icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX);
558 } else {
559 /* Use normal (exponential) backoff */
560 icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX);
561 }
Jon Maxwellb701a992018-07-19 11:14:44 +1000562 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
563 tcp_clamp_rto_to_user_timeout(sk), TCP_RTO_MAX);
Eric Dumazetce682ef2017-05-23 12:38:35 -0700564 if (retransmits_timed_out(sk, net->ipv4.sysctl_tcp_retries1 + 1, 0))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 __sk_dst_reset(sk);
566
567out:;
568}
569
Richard Sailerc380d372016-07-16 04:04:34 +0200570/* Called with bottom-half processing disabled.
571 Called by tcp_write_timer() */
Eric Dumazet6f458df2012-07-20 05:45:50 +0000572void tcp_write_timer_handler(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573{
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700574 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 int event;
576
Eric Dumazet02b2faa2017-03-03 14:08:21 -0800577 if (((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)) ||
578 !icsk->icsk_pending)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579 goto out;
580
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700581 if (time_after(icsk->icsk_timeout, jiffies)) {
582 sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 goto out;
584 }
585
Eric Dumazet9a568de2017-05-16 14:00:14 -0700586 tcp_mstamp_refresh(tcp_sk(sk));
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700587 event = icsk->icsk_pending;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588
589 switch (event) {
Yuchung Cheng57dde7f2017-01-12 22:11:33 -0800590 case ICSK_TIME_REO_TIMEOUT:
591 tcp_rack_reo_timeout(sk);
592 break;
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +0000593 case ICSK_TIME_LOSS_PROBE:
594 tcp_send_loss_probe(sk);
595 break;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700596 case ICSK_TIME_RETRANS:
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +0000597 icsk->icsk_pending = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 tcp_retransmit_timer(sk);
599 break;
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700600 case ICSK_TIME_PROBE0:
Nandita Dukkipati6ba8a3b2013-03-11 10:00:43 +0000601 icsk->icsk_pending = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 tcp_probe_timer(sk);
603 break;
604 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
606out:
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800607 sk_mem_reclaim(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000608}
609
Kees Cook59f379f2017-10-16 17:29:19 -0700610static void tcp_write_timer(struct timer_list *t)
Eric Dumazet6f458df2012-07-20 05:45:50 +0000611{
Kees Cook59f379f2017-10-16 17:29:19 -0700612 struct inet_connection_sock *icsk =
613 from_timer(icsk, t, icsk_retransmit_timer);
614 struct sock *sk = &icsk->icsk_inet.sk;
Eric Dumazet6f458df2012-07-20 05:45:50 +0000615
616 bh_lock_sock(sk);
617 if (!sock_owned_by_user(sk)) {
618 tcp_write_timer_handler(sk);
619 } else {
Richard Sailerc380d372016-07-16 04:04:34 +0200620 /* delegate our work to tcp_release_cb() */
Eric Dumazet7aa54702016-12-03 11:14:57 -0800621 if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, &sk->sk_tsq_flags))
Eric Dumazet144d56e2012-08-20 00:22:46 +0000622 sock_hold(sk);
Eric Dumazet6f458df2012-07-20 05:45:50 +0000623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 bh_unlock_sock(sk);
625 sock_put(sk);
626}
627
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700628void tcp_syn_ack_timeout(const struct request_sock *req)
Octavian Purdila72659ec2010-01-17 19:09:39 -0800629{
Eric Dumazet42cb80a2015-03-22 10:22:19 -0700630 struct net *net = read_pnet(&inet_rsk(req)->ireq_net);
631
Eric Dumazet02a1d6e2016-04-27 16:44:39 -0700632 __NET_INC_STATS(net, LINUX_MIB_TCPTIMEOUTS);
Octavian Purdila72659ec2010-01-17 19:09:39 -0800633}
634EXPORT_SYMBOL(tcp_syn_ack_timeout);
635
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636void tcp_set_keepalive(struct sock *sk, int val)
637{
638 if ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))
639 return;
640
641 if (val && !sock_flag(sk, SOCK_KEEPOPEN))
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700642 inet_csk_reset_keepalive_timer(sk, keepalive_time_when(tcp_sk(sk)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 else if (!val)
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700644 inet_csk_delete_keepalive_timer(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645}
Ursula Braun4b9d07a2017-01-09 16:55:12 +0100646EXPORT_SYMBOL_GPL(tcp_set_keepalive);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647
648
Kees Cook59f379f2017-10-16 17:29:19 -0700649static void tcp_keepalive_timer (struct timer_list *t)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650{
Kees Cook59f379f2017-10-16 17:29:19 -0700651 struct sock *sk = from_timer(sk, t, sk_timer);
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300652 struct inet_connection_sock *icsk = inet_csk(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 struct tcp_sock *tp = tcp_sk(sk);
Flavio Leitner6c37e5d2010-04-26 18:33:27 +0000654 u32 elapsed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
656 /* Only process if socket is not in use. */
657 bh_lock_sock(sk);
658 if (sock_owned_by_user(sk)) {
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900659 /* Try again later. */
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700660 inet_csk_reset_keepalive_timer (sk, HZ/20);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 goto out;
662 }
663
664 if (sk->sk_state == TCP_LISTEN) {
Eric Dumazetfa76ce732015-03-19 19:04:20 -0700665 pr_err("Hmm... keepalive on a LISTEN ???\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 goto out;
667 }
668
Eric Dumazet4688eb72017-12-12 18:22:52 -0800669 tcp_mstamp_refresh(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (sk->sk_state == TCP_FIN_WAIT2 && sock_flag(sk, SOCK_DEAD)) {
671 if (tp->linger2 >= 0) {
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700672 const int tmo = tcp_fin_time(sk) - TCP_TIMEWAIT_LEN;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673
674 if (tmo > 0) {
675 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
676 goto out;
677 }
678 }
679 tcp_send_active_reset(sk, GFP_ATOMIC);
680 goto death;
681 }
682
Eric Dumazet2dda6402017-08-02 23:10:46 -0700683 if (!sock_flag(sk, SOCK_KEEPOPEN) ||
684 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_SYN_SENT)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 goto out;
686
687 elapsed = keepalive_time_when(tp);
688
689 /* It is alive without keepalive 8) */
Eric Dumazet75c119a2017-10-05 22:21:27 -0700690 if (tp->packets_out || !tcp_write_queue_empty(sk))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 goto resched;
692
Flavio Leitner6c37e5d2010-04-26 18:33:27 +0000693 elapsed = keepalive_time_elapsed(tp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
695 if (elapsed >= keepalive_time_when(tp)) {
Jerry Chudca43c72010-08-27 19:13:28 +0000696 /* If the TCP_USER_TIMEOUT option is enabled, use that
697 * to determine when to timeout instead.
698 */
699 if ((icsk->icsk_user_timeout != 0 &&
Jon Maxwell9bcc66e2018-07-19 11:14:42 +1000700 elapsed >= msecs_to_jiffies(icsk->icsk_user_timeout) &&
Jerry Chudca43c72010-08-27 19:13:28 +0000701 icsk->icsk_probes_out > 0) ||
702 (icsk->icsk_user_timeout == 0 &&
703 icsk->icsk_probes_out >= keepalive_probes(tp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 tcp_send_active_reset(sk, GFP_ATOMIC);
705 tcp_write_err(sk);
706 goto out;
707 }
Eric Dumazete520af42015-05-06 14:26:25 -0700708 if (tcp_write_wakeup(sk, LINUX_MIB_TCPKEEPALIVE) <= 0) {
Arnaldo Carvalho de Melo6687e982005-08-10 04:03:31 -0300709 icsk->icsk_probes_out++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 elapsed = keepalive_intvl_when(tp);
711 } else {
712 /* If keepalive was lost due to local congestion,
713 * try harder.
714 */
715 elapsed = TCP_RESOURCE_PROBE_INTERVAL;
716 }
717 } else {
718 /* It is tp->rcv_tstamp + keepalive_time_when(tp) */
719 elapsed = keepalive_time_when(tp) - elapsed;
720 }
721
Hideo Aoki3ab224b2007-12-31 00:11:19 -0800722 sk_mem_reclaim(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724resched:
Arnaldo Carvalho de Melo463c84b2005-08-09 20:10:42 -0700725 inet_csk_reset_keepalive_timer (sk, elapsed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 goto out;
727
YOSHIFUJI Hideakie905a9e2007-02-09 23:24:47 +0900728death:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729 tcp_done(sk);
730
731out:
732 bh_unlock_sock(sk);
733 sock_put(sk);
734}
Eric Dumazet6f458df2012-07-20 05:45:50 +0000735
Eric Dumazet5d9f4262018-05-17 14:47:26 -0700736static enum hrtimer_restart tcp_compressed_ack_kick(struct hrtimer *timer)
737{
738 struct tcp_sock *tp = container_of(timer, struct tcp_sock, compressed_ack_timer);
739 struct sock *sk = (struct sock *)tp;
740
741 bh_lock_sock(sk);
742 if (!sock_owned_by_user(sk)) {
743 if (tp->compressed_ack)
744 tcp_send_ack(sk);
745 } else {
746 if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED,
747 &sk->sk_tsq_flags))
748 sock_hold(sk);
749 }
750 bh_unlock_sock(sk);
751
752 sock_put(sk);
753
754 return HRTIMER_NORESTART;
755}
756
Eric Dumazet6f458df2012-07-20 05:45:50 +0000757void tcp_init_xmit_timers(struct sock *sk)
758{
759 inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
760 &tcp_keepalive_timer);
Eric Dumazetfb420d52018-09-28 10:28:44 -0700761 hrtimer_init(&tcp_sk(sk)->pacing_timer, CLOCK_MONOTONIC,
Eric Dumazet73a6bab2018-05-10 14:59:43 -0700762 HRTIMER_MODE_ABS_PINNED_SOFT);
Eric Dumazet218af592017-05-16 04:24:36 -0700763 tcp_sk(sk)->pacing_timer.function = tcp_pace_kick;
Eric Dumazet5d9f4262018-05-17 14:47:26 -0700764
765 hrtimer_init(&tcp_sk(sk)->compressed_ack_timer, CLOCK_MONOTONIC,
766 HRTIMER_MODE_REL_PINNED_SOFT);
767 tcp_sk(sk)->compressed_ack_timer.function = tcp_compressed_ack_kick;
Eric Dumazet6f458df2012-07-20 05:45:50 +0000768}