blob: 6be2672a65eabebd5b8fc28d82ed9b3c58542005 [file] [log] [blame]
Thomas Gleixner2874c5f2019-05-27 08:55:01 +02001// SPDX-License-Identifier: GPL-2.0-or-later
David Howells17926a72007-04-26 15:48:28 -07002/* Management of Tx window, Tx resend, ACKs and out-of-sequence reception
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
David Howells17926a72007-04-26 15:48:28 -07006 */
7
Joe Perches9b6d5392016-06-02 12:08:52 -07008#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
David Howells17926a72007-04-26 15:48:28 -070010#include <linux/module.h>
11#include <linux/circ_buf.h>
12#include <linux/net.h>
13#include <linux/skbuff.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/slab.h>
David Howells17926a72007-04-26 15:48:28 -070015#include <linux/udp.h>
16#include <net/sock.h>
17#include <net/af_rxrpc.h>
18#include "ar-internal.h"
19
David Howells5873c082014-02-07 18:58:44 +000020/*
David Howellsa5af7e12016-10-06 08:11:49 +010021 * Propose a PING ACK be sent.
22 */
23static void rxrpc_propose_ping(struct rxrpc_call *call,
24 bool immediate, bool background)
25{
26 if (immediate) {
27 if (background &&
28 !test_and_set_bit(RXRPC_CALL_EV_PING, &call->events))
29 rxrpc_queue_call(call);
30 } else {
David Howellsa158bdd2017-11-24 10:18:41 +000031 unsigned long now = jiffies;
32 unsigned long ping_at = now + rxrpc_idle_ack_delay;
David Howellsa5af7e12016-10-06 08:11:49 +010033
David Howellsa158bdd2017-11-24 10:18:41 +000034 if (time_before(ping_at, call->ping_at)) {
35 WRITE_ONCE(call->ping_at, ping_at);
36 rxrpc_reduce_call_timer(call, ping_at, now,
37 rxrpc_timer_set_for_ping);
David Howellsa5af7e12016-10-06 08:11:49 +010038 }
39 }
40}
41
42/*
David Howells17926a72007-04-26 15:48:28 -070043 * propose an ACK be sent
44 */
David Howells248f2192016-09-08 11:10:12 +010045static void __rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
David Howellse8c3af62019-08-09 15:20:41 +010046 u32 serial, bool immediate, bool background,
David Howells9c7ad432016-09-23 13:50:40 +010047 enum rxrpc_propose_ack_trace why)
David Howells17926a72007-04-26 15:48:28 -070048{
David Howells9c7ad432016-09-23 13:50:40 +010049 enum rxrpc_propose_ack_outcome outcome = rxrpc_propose_ack_use;
David Howellsbeb8e5e2017-11-24 10:18:41 +000050 unsigned long expiry = rxrpc_soft_ack_delay;
David Howells17926a72007-04-26 15:48:28 -070051 s8 prior = rxrpc_ack_priority[ack_reason];
52
David Howellsa5af7e12016-10-06 08:11:49 +010053 /* Pings are handled specially because we don't want to accidentally
54 * lose a ping response by subsuming it into a ping.
55 */
56 if (ack_reason == RXRPC_ACK_PING) {
57 rxrpc_propose_ping(call, immediate, background);
58 goto trace;
59 }
60
David Howells248f2192016-09-08 11:10:12 +010061 /* Update DELAY, IDLE, REQUESTED and PING_RESPONSE ACK serial
62 * numbers, but we don't alter the timeout.
63 */
64 _debug("prior %u %u vs %u %u",
65 ack_reason, prior,
66 call->ackr_reason, rxrpc_ack_priority[call->ackr_reason]);
67 if (ack_reason == call->ackr_reason) {
68 if (RXRPC_ACK_UPDATEABLE & (1 << ack_reason)) {
David Howells9c7ad432016-09-23 13:50:40 +010069 outcome = rxrpc_propose_ack_update;
David Howells17926a72007-04-26 15:48:28 -070070 call->ackr_serial = serial;
David Howells563ea7d2016-08-23 15:27:25 +010071 }
David Howells248f2192016-09-08 11:10:12 +010072 if (!immediate)
David Howells9c7ad432016-09-23 13:50:40 +010073 goto trace;
David Howells248f2192016-09-08 11:10:12 +010074 } else if (prior > rxrpc_ack_priority[call->ackr_reason]) {
75 call->ackr_reason = ack_reason;
76 call->ackr_serial = serial;
David Howells9c7ad432016-09-23 13:50:40 +010077 } else {
78 outcome = rxrpc_propose_ack_subsume;
David Howells17926a72007-04-26 15:48:28 -070079 }
80
David Howells17926a72007-04-26 15:48:28 -070081 switch (ack_reason) {
David Howells248f2192016-09-08 11:10:12 +010082 case RXRPC_ACK_REQUESTED:
83 if (rxrpc_requested_ack_delay < expiry)
84 expiry = rxrpc_requested_ack_delay;
85 if (serial == 1)
86 immediate = false;
87 break;
88
David Howells17926a72007-04-26 15:48:28 -070089 case RXRPC_ACK_DELAY:
David Howells248f2192016-09-08 11:10:12 +010090 if (rxrpc_soft_ack_delay < expiry)
91 expiry = rxrpc_soft_ack_delay;
92 break;
David Howells17926a72007-04-26 15:48:28 -070093
94 case RXRPC_ACK_IDLE:
David Howells91c2c7b2016-09-13 22:36:21 +010095 if (rxrpc_idle_ack_delay < expiry)
David Howells5873c082014-02-07 18:58:44 +000096 expiry = rxrpc_idle_ack_delay;
David Howells248f2192016-09-08 11:10:12 +010097 break;
David Howells17926a72007-04-26 15:48:28 -070098
99 default:
David Howells248f2192016-09-08 11:10:12 +0100100 immediate = true;
101 break;
David Howells17926a72007-04-26 15:48:28 -0700102 }
103
David Howells248f2192016-09-08 11:10:12 +0100104 if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
105 _debug("already scheduled");
106 } else if (immediate || expiry == 0) {
107 _debug("immediate ACK %lx", call->events);
108 if (!test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events) &&
109 background)
110 rxrpc_queue_call(call);
111 } else {
David Howellsbeb8e5e2017-11-24 10:18:41 +0000112 unsigned long now = jiffies, ack_at;
113
David Howellsc410bf012020-05-11 14:54:34 +0100114 if (call->peer->srtt_us != 0)
115 ack_at = usecs_to_jiffies(call->peer->srtt_us >> 3);
David Howellsbeb8e5e2017-11-24 10:18:41 +0000116 else
117 ack_at = expiry;
118
David Howellsc7e86ac2018-11-01 13:39:53 +0000119 ack_at += READ_ONCE(call->tx_backoff);
Gustavo A. R. Silva282ef472017-11-28 11:28:52 -0600120 ack_at += now;
David Howellsa158bdd2017-11-24 10:18:41 +0000121 if (time_before(ack_at, call->ack_at)) {
122 WRITE_ONCE(call->ack_at, ack_at);
123 rxrpc_reduce_call_timer(call, ack_at, now,
124 rxrpc_timer_set_for_ack);
David Howells248f2192016-09-08 11:10:12 +0100125 }
126 }
David Howells9c7ad432016-09-23 13:50:40 +0100127
128trace:
129 trace_rxrpc_propose_ack(call, why, ack_reason, serial, immediate,
130 background, outcome);
David Howells17926a72007-04-26 15:48:28 -0700131}
132
133/*
134 * propose an ACK be sent, locking the call structure
135 */
David Howells4e36a952009-09-16 00:01:13 -0700136void rxrpc_propose_ACK(struct rxrpc_call *call, u8 ack_reason,
David Howellse8c3af62019-08-09 15:20:41 +0100137 u32 serial, bool immediate, bool background,
David Howells9c7ad432016-09-23 13:50:40 +0100138 enum rxrpc_propose_ack_trace why)
David Howells17926a72007-04-26 15:48:28 -0700139{
David Howells248f2192016-09-08 11:10:12 +0100140 spin_lock_bh(&call->lock);
David Howellse8c3af62019-08-09 15:20:41 +0100141 __rxrpc_propose_ACK(call, ack_reason, serial,
David Howells9c7ad432016-09-23 13:50:40 +0100142 immediate, background, why);
David Howells248f2192016-09-08 11:10:12 +0100143 spin_unlock_bh(&call->lock);
David Howells17926a72007-04-26 15:48:28 -0700144}
145
146/*
David Howells57494342016-09-24 18:05:27 +0100147 * Handle congestion being detected by the retransmit timeout.
148 */
149static void rxrpc_congestion_timeout(struct rxrpc_call *call)
150{
151 set_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags);
152}
153
154/*
David Howells248f2192016-09-08 11:10:12 +0100155 * Perform retransmission of NAK'd and unack'd packets.
David Howells17926a72007-04-26 15:48:28 -0700156 */
David Howellsa158bdd2017-11-24 10:18:41 +0000157static void rxrpc_resend(struct rxrpc_call *call, unsigned long now_j)
David Howells17926a72007-04-26 15:48:28 -0700158{
David Howells17926a72007-04-26 15:48:28 -0700159 struct sk_buff *skb;
David Howellsc410bf012020-05-11 14:54:34 +0100160 unsigned long resend_at, rto_j;
David Howells248f2192016-09-08 11:10:12 +0100161 rxrpc_seq_t cursor, seq, top;
David Howellsc410bf012020-05-11 14:54:34 +0100162 ktime_t now, max_age, oldest, ack_ts;
David Howells248f2192016-09-08 11:10:12 +0100163 int ix;
David Howells57494342016-09-24 18:05:27 +0100164 u8 annotation, anno_type, retrans = 0, unacked = 0;
David Howells17926a72007-04-26 15:48:28 -0700165
David Howells248f2192016-09-08 11:10:12 +0100166 _enter("{%d,%d}", call->tx_hard_ack, call->tx_top);
David Howells17926a72007-04-26 15:48:28 -0700167
David Howellsc410bf012020-05-11 14:54:34 +0100168 rto_j = call->peer->rto_j;
David Howellsbeb8e5e2017-11-24 10:18:41 +0000169
David Howellsa158bdd2017-11-24 10:18:41 +0000170 now = ktime_get_real();
David Howellsc410bf012020-05-11 14:54:34 +0100171 max_age = ktime_sub(now, jiffies_to_usecs(rto_j));
David Howells50235c42016-09-22 00:29:31 +0100172
David Howells17926a72007-04-26 15:48:28 -0700173 spin_lock_bh(&call->lock);
174
David Howells248f2192016-09-08 11:10:12 +0100175 cursor = call->tx_hard_ack;
176 top = call->tx_top;
177 ASSERT(before_eq(cursor, top));
178 if (cursor == top)
179 goto out_unlock;
David Howells17926a72007-04-26 15:48:28 -0700180
David Howells248f2192016-09-08 11:10:12 +0100181 /* Scan the packet list without dropping the lock and decide which of
182 * the packets in the Tx buffer we're going to resend and what the new
183 * resend timeout will be.
184 */
David Howells827efed2018-03-27 23:02:47 +0100185 trace_rxrpc_resend(call, (cursor + 1) & RXRPC_RXTX_BUFF_MASK);
David Howells50235c42016-09-22 00:29:31 +0100186 oldest = now;
David Howellsdfa7d922016-09-17 10:49:12 +0100187 for (seq = cursor + 1; before_eq(seq, top); seq++) {
David Howells248f2192016-09-08 11:10:12 +0100188 ix = seq & RXRPC_RXTX_BUFF_MASK;
189 annotation = call->rxtx_annotations[ix];
David Howellsf07373e2016-09-22 00:29:32 +0100190 anno_type = annotation & RXRPC_TX_ANNO_MASK;
191 annotation &= ~RXRPC_TX_ANNO_MASK;
192 if (anno_type == RXRPC_TX_ANNO_ACK)
David Howells248f2192016-09-08 11:10:12 +0100193 continue;
194
195 skb = call->rxtx_buffer[ix];
David Howells987db9f2019-08-19 09:25:38 +0100196 rxrpc_see_skb(skb, rxrpc_skb_seen);
David Howells17926a72007-04-26 15:48:28 -0700197
David Howellsf07373e2016-09-22 00:29:32 +0100198 if (anno_type == RXRPC_TX_ANNO_UNACK) {
David Howells50235c42016-09-22 00:29:31 +0100199 if (ktime_after(skb->tstamp, max_age)) {
200 if (ktime_before(skb->tstamp, oldest))
201 oldest = skb->tstamp;
David Howells248f2192016-09-08 11:10:12 +0100202 continue;
203 }
David Howells57494342016-09-24 18:05:27 +0100204 if (!(annotation & RXRPC_TX_ANNO_RESENT))
205 unacked++;
David Howells17926a72007-04-26 15:48:28 -0700206 }
David Howells17926a72007-04-26 15:48:28 -0700207
David Howells248f2192016-09-08 11:10:12 +0100208 /* Okay, we need to retransmit a packet. */
David Howellsf07373e2016-09-22 00:29:32 +0100209 call->rxtx_annotations[ix] = RXRPC_TX_ANNO_RETRANS | annotation;
David Howells57494342016-09-24 18:05:27 +0100210 retrans++;
David Howellsc6672e32016-09-23 13:58:55 +0100211 trace_rxrpc_retransmit(call, seq, annotation | anno_type,
212 ktime_to_ns(ktime_sub(skb->tstamp, max_age)));
David Howellsdfa7d922016-09-17 10:49:12 +0100213 }
David Howells17926a72007-04-26 15:48:28 -0700214
Marc Dionne59299aa2018-03-30 21:04:44 +0100215 resend_at = nsecs_to_jiffies(ktime_to_ns(ktime_sub(now, oldest)));
David Howellsc410bf012020-05-11 14:54:34 +0100216 resend_at += jiffies + rto_j;
David Howellsa158bdd2017-11-24 10:18:41 +0000217 WRITE_ONCE(call->resend_at, resend_at);
David Howells17926a72007-04-26 15:48:28 -0700218
David Howells57494342016-09-24 18:05:27 +0100219 if (unacked)
220 rxrpc_congestion_timeout(call);
221
222 /* If there was nothing that needed retransmission then it's likely
223 * that an ACK got lost somewhere. Send a ping to find out instead of
224 * retransmitting data.
225 */
226 if (!retrans) {
David Howellsf82eb882018-03-30 21:04:43 +0100227 rxrpc_reduce_call_timer(call, resend_at, now_j,
David Howellsa158bdd2017-11-24 10:18:41 +0000228 rxrpc_timer_set_for_resend);
David Howells57494342016-09-24 18:05:27 +0100229 spin_unlock_bh(&call->lock);
230 ack_ts = ktime_sub(now, call->acks_latest_ts);
David Howellsc410bf012020-05-11 14:54:34 +0100231 if (ktime_to_us(ack_ts) < (call->peer->srtt_us >> 3))
David Howells57494342016-09-24 18:05:27 +0100232 goto out;
David Howellse8c3af62019-08-09 15:20:41 +0100233 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
David Howells57494342016-09-24 18:05:27 +0100234 rxrpc_propose_ack_ping_for_lost_ack);
David Howellsbd1fdf82017-11-24 10:18:42 +0000235 rxrpc_send_ack_packet(call, true, NULL);
David Howells57494342016-09-24 18:05:27 +0100236 goto out;
237 }
238
David Howells248f2192016-09-08 11:10:12 +0100239 /* Now go through the Tx window and perform the retransmissions. We
240 * have to drop the lock for each send. If an ACK comes in whilst the
241 * lock is dropped, it may clear some of the retransmission markers for
242 * packets that it soft-ACKs.
243 */
David Howellsdfa7d922016-09-17 10:49:12 +0100244 for (seq = cursor + 1; before_eq(seq, top); seq++) {
David Howells248f2192016-09-08 11:10:12 +0100245 ix = seq & RXRPC_RXTX_BUFF_MASK;
246 annotation = call->rxtx_annotations[ix];
David Howellsf07373e2016-09-22 00:29:32 +0100247 anno_type = annotation & RXRPC_TX_ANNO_MASK;
248 if (anno_type != RXRPC_TX_ANNO_RETRANS)
David Howells248f2192016-09-08 11:10:12 +0100249 continue;
David Howells17926a72007-04-26 15:48:28 -0700250
David Howells2ad66912020-06-11 21:57:00 +0100251 /* We need to reset the retransmission state, but we need to do
252 * so before we drop the lock as a new ACK/NAK may come in and
253 * confuse things
254 */
255 annotation &= ~RXRPC_TX_ANNO_MASK;
David Howells02c28df2020-06-17 15:46:33 +0100256 annotation |= RXRPC_TX_ANNO_UNACK | RXRPC_TX_ANNO_RESENT;
David Howells2ad66912020-06-11 21:57:00 +0100257 call->rxtx_annotations[ix] = annotation;
258
David Howells248f2192016-09-08 11:10:12 +0100259 skb = call->rxtx_buffer[ix];
David Howells2ad66912020-06-11 21:57:00 +0100260 if (!skb)
261 continue;
262
David Howells987db9f2019-08-19 09:25:38 +0100263 rxrpc_get_skb(skb, rxrpc_skb_got);
David Howells248f2192016-09-08 11:10:12 +0100264 spin_unlock_bh(&call->lock);
David Howells248f2192016-09-08 11:10:12 +0100265
David Howellsa1767072016-09-29 22:37:15 +0100266 if (rxrpc_send_data_packet(call, skb, true) < 0) {
David Howells987db9f2019-08-19 09:25:38 +0100267 rxrpc_free_skb(skb, rxrpc_skb_freed);
David Howells248f2192016-09-08 11:10:12 +0100268 return;
269 }
270
271 if (rxrpc_is_client_call(call))
272 rxrpc_expose_client_call(call);
David Howells248f2192016-09-08 11:10:12 +0100273
David Howells987db9f2019-08-19 09:25:38 +0100274 rxrpc_free_skb(skb, rxrpc_skb_freed);
David Howells17926a72007-04-26 15:48:28 -0700275 spin_lock_bh(&call->lock);
David Howells248f2192016-09-08 11:10:12 +0100276 if (after(call->tx_hard_ack, seq))
277 seq = call->tx_hard_ack;
David Howellsdfa7d922016-09-17 10:49:12 +0100278 }
David Howells248f2192016-09-08 11:10:12 +0100279
280out_unlock:
281 spin_unlock_bh(&call->lock);
David Howells57494342016-09-24 18:05:27 +0100282out:
David Howells248f2192016-09-08 11:10:12 +0100283 _leave("");
David Howells17926a72007-04-26 15:48:28 -0700284}
285
286/*
David Howells248f2192016-09-08 11:10:12 +0100287 * Handle retransmission and deferred ACK/abort generation.
David Howells17926a72007-04-26 15:48:28 -0700288 */
289void rxrpc_process_call(struct work_struct *work)
290{
291 struct rxrpc_call *call =
292 container_of(work, struct rxrpc_call, processor);
David Howellsbd1fdf82017-11-24 10:18:42 +0000293 rxrpc_serial_t *send_ack;
David Howellsa158bdd2017-11-24 10:18:41 +0000294 unsigned long now, next, t;
David Howellsc7e86ac2018-11-01 13:39:53 +0000295 unsigned int iterations = 0;
David Howells17926a72007-04-26 15:48:28 -0700296
David Howellse34d4232016-08-30 09:49:29 +0100297 rxrpc_see_call(call);
298
David Howells17926a72007-04-26 15:48:28 -0700299 //printk("\n--------------------\n");
David Howells248f2192016-09-08 11:10:12 +0100300 _enter("{%d,%s,%lx}",
301 call->debug_id, rxrpc_call_states[call->state], call->events);
David Howells17926a72007-04-26 15:48:28 -0700302
David Howells248f2192016-09-08 11:10:12 +0100303recheck_state:
David Howellsc7e86ac2018-11-01 13:39:53 +0000304 /* Limit the number of times we do this before returning to the manager */
305 iterations++;
306 if (iterations > 5)
307 goto requeue;
308
David Howells248f2192016-09-08 11:10:12 +0100309 if (test_and_clear_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
David Howells26cb02a2016-10-06 08:11:49 +0100310 rxrpc_send_abort_packet(call);
David Howells248f2192016-09-08 11:10:12 +0100311 goto recheck_state;
David Howells8d94aa32016-09-07 09:19:31 +0100312 }
313
David Howells248f2192016-09-08 11:10:12 +0100314 if (call->state == RXRPC_CALL_COMPLETE) {
315 del_timer_sync(&call->timer);
316 goto out_put;
David Howells17926a72007-04-26 15:48:28 -0700317 }
318
David Howellsa158bdd2017-11-24 10:18:41 +0000319 /* Work out if any timeouts tripped */
320 now = jiffies;
321 t = READ_ONCE(call->expect_rx_by);
322 if (time_after_eq(now, t)) {
323 trace_rxrpc_timer(call, rxrpc_timer_exp_normal, now);
324 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
325 }
326
327 t = READ_ONCE(call->expect_req_by);
328 if (call->state == RXRPC_CALL_SERVER_RECV_REQUEST &&
329 time_after_eq(now, t)) {
330 trace_rxrpc_timer(call, rxrpc_timer_exp_idle, now);
331 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
332 }
333
334 t = READ_ONCE(call->expect_term_by);
335 if (time_after_eq(now, t)) {
336 trace_rxrpc_timer(call, rxrpc_timer_exp_hard, now);
337 set_bit(RXRPC_CALL_EV_EXPIRED, &call->events);
338 }
339
340 t = READ_ONCE(call->ack_at);
341 if (time_after_eq(now, t)) {
342 trace_rxrpc_timer(call, rxrpc_timer_exp_ack, now);
343 cmpxchg(&call->ack_at, t, now + MAX_JIFFY_OFFSET);
344 set_bit(RXRPC_CALL_EV_ACK, &call->events);
345 }
346
David Howellsbd1fdf82017-11-24 10:18:42 +0000347 t = READ_ONCE(call->ack_lost_at);
348 if (time_after_eq(now, t)) {
349 trace_rxrpc_timer(call, rxrpc_timer_exp_lost_ack, now);
350 cmpxchg(&call->ack_lost_at, t, now + MAX_JIFFY_OFFSET);
351 set_bit(RXRPC_CALL_EV_ACK_LOST, &call->events);
352 }
353
David Howells415f44e2017-11-24 10:18:42 +0000354 t = READ_ONCE(call->keepalive_at);
355 if (time_after_eq(now, t)) {
356 trace_rxrpc_timer(call, rxrpc_timer_exp_keepalive, now);
357 cmpxchg(&call->keepalive_at, t, now + MAX_JIFFY_OFFSET);
David Howellse8c3af62019-08-09 15:20:41 +0100358 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, true,
David Howells415f44e2017-11-24 10:18:42 +0000359 rxrpc_propose_ack_ping_for_keepalive);
360 set_bit(RXRPC_CALL_EV_PING, &call->events);
361 }
362
David Howellsa158bdd2017-11-24 10:18:41 +0000363 t = READ_ONCE(call->ping_at);
364 if (time_after_eq(now, t)) {
365 trace_rxrpc_timer(call, rxrpc_timer_exp_ping, now);
366 cmpxchg(&call->ping_at, t, now + MAX_JIFFY_OFFSET);
367 set_bit(RXRPC_CALL_EV_PING, &call->events);
368 }
369
370 t = READ_ONCE(call->resend_at);
371 if (time_after_eq(now, t)) {
372 trace_rxrpc_timer(call, rxrpc_timer_exp_resend, now);
373 cmpxchg(&call->resend_at, t, now + MAX_JIFFY_OFFSET);
374 set_bit(RXRPC_CALL_EV_RESEND, &call->events);
375 }
376
377 /* Process events */
378 if (test_and_clear_bit(RXRPC_CALL_EV_EXPIRED, &call->events)) {
David Howells1a025022018-06-03 02:17:39 +0100379 if (test_bit(RXRPC_CALL_RX_HEARD, &call->flags) &&
380 (int)call->conn->hi_serial - (int)call->rx_serial > 0) {
381 trace_rxrpc_call_reset(call);
382 rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ECONNRESET);
383 } else {
384 rxrpc_abort_call("EXP", call, 0, RX_USER_ABORT, -ETIME);
385 }
David Howells248f2192016-09-08 11:10:12 +0100386 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
David Howells57494342016-09-24 18:05:27 +0100387 goto recheck_state;
David Howells17926a72007-04-26 15:48:28 -0700388 }
389
David Howellsbd1fdf82017-11-24 10:18:42 +0000390 send_ack = NULL;
391 if (test_and_clear_bit(RXRPC_CALL_EV_ACK_LOST, &call->events)) {
392 call->acks_lost_top = call->tx_top;
David Howellse8c3af62019-08-09 15:20:41 +0100393 rxrpc_propose_ACK(call, RXRPC_ACK_PING, 0, true, false,
David Howellsbd1fdf82017-11-24 10:18:42 +0000394 rxrpc_propose_ack_ping_for_lost_ack);
395 send_ack = &call->acks_lost_ping;
396 }
397
398 if (test_and_clear_bit(RXRPC_CALL_EV_ACK, &call->events) ||
399 send_ack) {
David Howells248f2192016-09-08 11:10:12 +0100400 if (call->ackr_reason) {
David Howellsbd1fdf82017-11-24 10:18:42 +0000401 rxrpc_send_ack_packet(call, false, send_ack);
David Howells248f2192016-09-08 11:10:12 +0100402 goto recheck_state;
David Howells17926a72007-04-26 15:48:28 -0700403 }
404 }
405
David Howellsa5af7e12016-10-06 08:11:49 +0100406 if (test_and_clear_bit(RXRPC_CALL_EV_PING, &call->events)) {
David Howellsbd1fdf82017-11-24 10:18:42 +0000407 rxrpc_send_ack_packet(call, true, NULL);
David Howellsa5af7e12016-10-06 08:11:49 +0100408 goto recheck_state;
409 }
410
David Howells405dea12016-09-30 09:13:50 +0100411 if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events)) {
David Howellsdf0adc72016-09-26 22:12:49 +0100412 rxrpc_resend(call, now);
David Howells248f2192016-09-08 11:10:12 +0100413 goto recheck_state;
David Howells17926a72007-04-26 15:48:28 -0700414 }
415
David Howellsa158bdd2017-11-24 10:18:41 +0000416 /* Make sure the timer is restarted */
417 next = call->expect_rx_by;
418
419#define set(T) { t = READ_ONCE(T); if (time_before(t, next)) next = t; }
David Howells3d7682a2017-11-29 14:25:50 +0000420
David Howellsa158bdd2017-11-24 10:18:41 +0000421 set(call->expect_req_by);
422 set(call->expect_term_by);
423 set(call->ack_at);
David Howellsbd1fdf82017-11-24 10:18:42 +0000424 set(call->ack_lost_at);
David Howellsa158bdd2017-11-24 10:18:41 +0000425 set(call->resend_at);
David Howells415f44e2017-11-24 10:18:42 +0000426 set(call->keepalive_at);
David Howellsa158bdd2017-11-24 10:18:41 +0000427 set(call->ping_at);
428
429 now = jiffies;
430 if (time_after_eq(now, next))
431 goto recheck_state;
432
433 rxrpc_reduce_call_timer(call, next, now, rxrpc_timer_restart);
David Howells17926a72007-04-26 15:48:28 -0700434
435 /* other events may have been raised since we started checking */
David Howellsc7e86ac2018-11-01 13:39:53 +0000436 if (call->events && call->state < RXRPC_CALL_COMPLETE)
437 goto requeue;
David Howells17926a72007-04-26 15:48:28 -0700438
David Howells248f2192016-09-08 11:10:12 +0100439out_put:
440 rxrpc_put_call(call, rxrpc_call_put);
441out:
David Howells17926a72007-04-26 15:48:28 -0700442 _leave("");
David Howellsc7e86ac2018-11-01 13:39:53 +0000443 return;
444
445requeue:
446 __rxrpc_queue_call(call);
447 goto out;
David Howells17926a72007-04-26 15:48:28 -0700448}