blob: fa41d988174146f6888d29db743b074d7b1ee1db [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braunb38d7322017-01-09 16:55:25 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Socket Closing - normal and abnormal
6 *
7 * Copyright IBM Corp. 2016
8 *
9 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
10 */
11
12#include <linux/workqueue.h>
Ingo Molnarc3edc402017-02-02 08:35:14 +010013#include <linux/sched/signal.h>
14
Ursula Braunb38d7322017-01-09 16:55:25 +010015#include <net/sock.h>
16
17#include "smc.h"
18#include "smc_tx.h"
19#include "smc_cdc.h"
20#include "smc_close.h"
21
Ursula Braun127f4972018-01-26 09:28:49 +010022#define SMC_CLOSE_WAIT_LISTEN_CLCSOCK_TIME (5 * HZ)
23
Ursula Braunb38d7322017-01-09 16:55:25 +010024static void smc_close_cleanup_listen(struct sock *parent)
25{
26 struct sock *sk;
27
28 /* Close non-accepted connections */
29 while ((sk = smc_accept_dequeue(parent, NULL)))
30 smc_close_non_accepted(sk);
31}
32
Ursula Braunb38d7322017-01-09 16:55:25 +010033/* wait for sndbuf data being transmitted */
34static void smc_close_stream_wait(struct smc_sock *smc, long timeout)
35{
36 DEFINE_WAIT_FUNC(wait, woken_wake_function);
37 struct sock *sk = &smc->sk;
38
39 if (!timeout)
40 return;
41
42 if (!smc_tx_prepared_sends(&smc->conn))
43 return;
44
45 smc->wait_close_tx_prepared = 1;
46 add_wait_queue(sk_sleep(sk), &wait);
47 while (!signal_pending(current) && timeout) {
48 int rc;
49
50 rc = sk_wait_event(sk, &timeout,
51 !smc_tx_prepared_sends(&smc->conn) ||
52 (sk->sk_err == ECONNABORTED) ||
53 (sk->sk_err == ECONNRESET),
54 &wait);
55 if (rc)
56 break;
57 }
58 remove_wait_queue(sk_sleep(sk), &wait);
59 smc->wait_close_tx_prepared = 0;
60}
61
62void smc_close_wake_tx_prepared(struct smc_sock *smc)
63{
64 if (smc->wait_close_tx_prepared)
65 /* wake up socket closing */
66 smc->sk.sk_state_change(&smc->sk);
67}
68
69static int smc_close_wr(struct smc_connection *conn)
70{
71 conn->local_tx_ctrl.conn_state_flags.peer_done_writing = 1;
72
73 return smc_cdc_get_slot_and_msg_send(conn);
74}
75
76static int smc_close_final(struct smc_connection *conn)
77{
78 if (atomic_read(&conn->bytes_to_rcv))
79 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
80 else
81 conn->local_tx_ctrl.conn_state_flags.peer_conn_closed = 1;
82
83 return smc_cdc_get_slot_and_msg_send(conn);
84}
85
86static int smc_close_abort(struct smc_connection *conn)
87{
88 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
89
90 return smc_cdc_get_slot_and_msg_send(conn);
91}
92
93/* terminate smc socket abnormally - active abort
Ursula Braun732720f2018-01-25 11:15:35 +010094 * link group is terminated, i.e. RDMA communication no longer possible
Ursula Braunb38d7322017-01-09 16:55:25 +010095 */
Ursula Brauna8ae8902017-12-07 13:38:44 +010096static void smc_close_active_abort(struct smc_sock *smc)
Ursula Braunb38d7322017-01-09 16:55:25 +010097{
Ursula Braun3163c502018-01-24 10:28:12 +010098 struct sock *sk = &smc->sk;
99
Ursula Braunb38d7322017-01-09 16:55:25 +0100100 struct smc_cdc_conn_state_flags *txflags =
101 &smc->conn.local_tx_ctrl.conn_state_flags;
102
Ursula Braun3163c502018-01-24 10:28:12 +0100103 sk->sk_err = ECONNABORTED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100104 if (smc->clcsock && smc->clcsock->sk) {
105 smc->clcsock->sk->sk_err = ECONNABORTED;
106 smc->clcsock->sk->sk_state_change(smc->clcsock->sk);
107 }
Ursula Braun3163c502018-01-24 10:28:12 +0100108 switch (sk->sk_state) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100109 case SMC_INIT:
Ursula Braun46c28db2017-04-10 14:58:01 +0200110 case SMC_ACTIVE:
Ursula Braun3163c502018-01-24 10:28:12 +0100111 sk->sk_state = SMC_PEERABORTWAIT;
Ursula Braun611b63a2018-01-25 11:15:31 +0100112 release_sock(sk);
113 cancel_delayed_work_sync(&smc->conn.tx_work);
114 lock_sock(sk);
Ursula Braun51f1de72018-01-26 09:28:48 +0100115 sock_put(sk); /* passive closing */
Ursula Braunb38d7322017-01-09 16:55:25 +0100116 break;
117 case SMC_APPCLOSEWAIT1:
118 case SMC_APPCLOSEWAIT2:
Ursula Braunb38d7322017-01-09 16:55:25 +0100119 if (!smc_cdc_rxed_any_close(&smc->conn))
Ursula Braun3163c502018-01-24 10:28:12 +0100120 sk->sk_state = SMC_PEERABORTWAIT;
Ursula Braunb38d7322017-01-09 16:55:25 +0100121 else
Ursula Braun3163c502018-01-24 10:28:12 +0100122 sk->sk_state = SMC_CLOSED;
Ursula Braun611b63a2018-01-25 11:15:31 +0100123 release_sock(sk);
124 cancel_delayed_work_sync(&smc->conn.tx_work);
125 lock_sock(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100126 break;
127 case SMC_PEERCLOSEWAIT1:
128 case SMC_PEERCLOSEWAIT2:
129 if (!txflags->peer_conn_closed) {
Ursula Braun51f1de72018-01-26 09:28:48 +0100130 /* just SHUTDOWN_SEND done */
Ursula Braun3163c502018-01-24 10:28:12 +0100131 sk->sk_state = SMC_PEERABORTWAIT;
Ursula Braunb38d7322017-01-09 16:55:25 +0100132 } else {
Ursula Braun3163c502018-01-24 10:28:12 +0100133 sk->sk_state = SMC_CLOSED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100134 }
Ursula Braun51f1de72018-01-26 09:28:48 +0100135 sock_put(sk); /* passive closing */
Ursula Braunb38d7322017-01-09 16:55:25 +0100136 break;
137 case SMC_PROCESSABORT:
138 case SMC_APPFINCLOSEWAIT:
Ursula Braun3163c502018-01-24 10:28:12 +0100139 sk->sk_state = SMC_CLOSED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100140 break;
141 case SMC_PEERFINCLOSEWAIT:
Ursula Braun51f1de72018-01-26 09:28:48 +0100142 sock_put(sk); /* passive closing */
143 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100144 case SMC_PEERABORTWAIT:
145 case SMC_CLOSED:
146 break;
147 }
148
Ursula Braun3163c502018-01-24 10:28:12 +0100149 sock_set_flag(sk, SOCK_DEAD);
150 sk->sk_state_change(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100151}
152
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200153static inline bool smc_close_sent_any_close(struct smc_connection *conn)
154{
155 return conn->local_tx_ctrl.conn_state_flags.peer_conn_abort ||
156 conn->local_tx_ctrl.conn_state_flags.peer_conn_closed;
157}
158
Ursula Braunb38d7322017-01-09 16:55:25 +0100159int smc_close_active(struct smc_sock *smc)
160{
161 struct smc_cdc_conn_state_flags *txflags =
162 &smc->conn.local_tx_ctrl.conn_state_flags;
Ursula Braunb38d7322017-01-09 16:55:25 +0100163 struct smc_connection *conn = &smc->conn;
164 struct sock *sk = &smc->sk;
165 int old_state;
Ursula Braun8c96fee2017-09-21 09:16:34 +0200166 long timeout;
Ursula Braunb38d7322017-01-09 16:55:25 +0100167 int rc = 0;
168
Ursula Braun8c96fee2017-09-21 09:16:34 +0200169 timeout = current->flags & PF_EXITING ?
170 0 : sock_flag(sk, SOCK_LINGER) ?
171 sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
Ursula Braunb38d7322017-01-09 16:55:25 +0100172
Ursula Braunb38d7322017-01-09 16:55:25 +0100173 old_state = sk->sk_state;
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100174again:
175 switch (sk->sk_state) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100176 case SMC_INIT:
177 sk->sk_state = SMC_CLOSED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100178 break;
179 case SMC_LISTEN:
180 sk->sk_state = SMC_CLOSED;
181 sk->sk_state_change(sk); /* wake up accept */
182 if (smc->clcsock && smc->clcsock->sk) {
183 rc = kernel_sock_shutdown(smc->clcsock, SHUT_RDWR);
184 /* wake up kernel_accept of smc_tcp_listen_worker */
185 smc->clcsock->sk->sk_data_ready(smc->clcsock->sk);
186 }
Ursula Braunb38d7322017-01-09 16:55:25 +0100187 smc_close_cleanup_listen(sk);
Ursula Braun3d502062018-03-13 10:41:54 +0100188 release_sock(sk);
189 flush_work(&smc->tcp_listen_work);
190 lock_sock(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100191 break;
192 case SMC_ACTIVE:
193 smc_close_stream_wait(smc, timeout);
194 release_sock(sk);
Ursula Braun18e537c2017-09-21 09:16:33 +0200195 cancel_delayed_work_sync(&conn->tx_work);
Ursula Braunb38d7322017-01-09 16:55:25 +0100196 lock_sock(sk);
197 if (sk->sk_state == SMC_ACTIVE) {
198 /* send close request */
199 rc = smc_close_final(conn);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100200 if (rc)
201 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100202 sk->sk_state = SMC_PEERCLOSEWAIT1;
203 } else {
204 /* peer event has changed the state */
205 goto again;
206 }
207 break;
208 case SMC_APPFINCLOSEWAIT:
209 /* socket already shutdown wr or both (active close) */
210 if (txflags->peer_done_writing &&
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200211 !smc_close_sent_any_close(conn)) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100212 /* just shutdown wr done, send close request */
213 rc = smc_close_final(conn);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100214 if (rc)
215 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100216 }
217 sk->sk_state = SMC_CLOSED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100218 break;
219 case SMC_APPCLOSEWAIT1:
220 case SMC_APPCLOSEWAIT2:
221 if (!smc_cdc_rxed_any_close(conn))
222 smc_close_stream_wait(smc, timeout);
223 release_sock(sk);
Ursula Braun18e537c2017-09-21 09:16:33 +0200224 cancel_delayed_work_sync(&conn->tx_work);
Ursula Braunb38d7322017-01-09 16:55:25 +0100225 lock_sock(sk);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100226 if (sk->sk_state != SMC_APPCLOSEWAIT1 &&
227 sk->sk_state != SMC_APPCLOSEWAIT2)
228 goto again;
229 /* confirm close from peer */
230 rc = smc_close_final(conn);
231 if (rc)
232 break;
Ursula Braun51f1de72018-01-26 09:28:48 +0100233 if (smc_cdc_rxed_any_close(conn)) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100234 /* peer has closed the socket already */
235 sk->sk_state = SMC_CLOSED;
Ursula Braun51f1de72018-01-26 09:28:48 +0100236 sock_put(sk); /* postponed passive closing */
237 } else {
Ursula Braunb38d7322017-01-09 16:55:25 +0100238 /* peer has just issued a shutdown write */
239 sk->sk_state = SMC_PEERFINCLOSEWAIT;
Ursula Braun51f1de72018-01-26 09:28:48 +0100240 }
Ursula Braunb38d7322017-01-09 16:55:25 +0100241 break;
242 case SMC_PEERCLOSEWAIT1:
243 case SMC_PEERCLOSEWAIT2:
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200244 if (txflags->peer_done_writing &&
245 !smc_close_sent_any_close(conn)) {
246 /* just shutdown wr done, send close request */
247 rc = smc_close_final(conn);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100248 if (rc)
249 break;
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200250 }
251 /* peer sending PeerConnectionClosed will cause transition */
252 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100253 case SMC_PEERFINCLOSEWAIT:
254 /* peer sending PeerConnectionClosed will cause transition */
255 break;
256 case SMC_PROCESSABORT:
Ursula Braunb38d7322017-01-09 16:55:25 +0100257 smc_close_abort(conn);
258 sk->sk_state = SMC_CLOSED;
Ursula Braunb38d7322017-01-09 16:55:25 +0100259 break;
260 case SMC_PEERABORTWAIT:
261 case SMC_CLOSED:
262 /* nothing to do, add tracing in future patch */
263 break;
264 }
265
266 if (old_state != sk->sk_state)
Ursula Braun3163c502018-01-24 10:28:12 +0100267 sk->sk_state_change(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100268 return rc;
269}
270
271static void smc_close_passive_abort_received(struct smc_sock *smc)
272{
273 struct smc_cdc_conn_state_flags *txflags =
274 &smc->conn.local_tx_ctrl.conn_state_flags;
275 struct sock *sk = &smc->sk;
276
277 switch (sk->sk_state) {
Ursula Braun51f1de72018-01-26 09:28:48 +0100278 case SMC_INIT:
Ursula Braunb38d7322017-01-09 16:55:25 +0100279 case SMC_ACTIVE:
Ursula Braunb38d7322017-01-09 16:55:25 +0100280 case SMC_APPCLOSEWAIT1:
Ursula Braun51f1de72018-01-26 09:28:48 +0100281 sk->sk_state = SMC_PROCESSABORT;
282 sock_put(sk); /* passive closing */
283 break;
284 case SMC_APPFINCLOSEWAIT:
Ursula Braunb38d7322017-01-09 16:55:25 +0100285 sk->sk_state = SMC_PROCESSABORT;
286 break;
287 case SMC_PEERCLOSEWAIT1:
288 case SMC_PEERCLOSEWAIT2:
289 if (txflags->peer_done_writing &&
Ursula Braun51f1de72018-01-26 09:28:48 +0100290 !smc_close_sent_any_close(&smc->conn))
Ursula Braunb38d7322017-01-09 16:55:25 +0100291 /* just shutdown, but not yet closed locally */
Ursula Braunb38d7322017-01-09 16:55:25 +0100292 sk->sk_state = SMC_PROCESSABORT;
Ursula Braun51f1de72018-01-26 09:28:48 +0100293 else
Ursula Braunb38d7322017-01-09 16:55:25 +0100294 sk->sk_state = SMC_CLOSED;
Ursula Braun51f1de72018-01-26 09:28:48 +0100295 sock_put(sk); /* passive closing */
Ursula Braunb38d7322017-01-09 16:55:25 +0100296 break;
Ursula Braun51f1de72018-01-26 09:28:48 +0100297 case SMC_APPCLOSEWAIT2:
Ursula Braunb38d7322017-01-09 16:55:25 +0100298 case SMC_PEERFINCLOSEWAIT:
Ursula Braun51f1de72018-01-26 09:28:48 +0100299 sk->sk_state = SMC_CLOSED;
300 sock_put(sk); /* passive closing */
301 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100302 case SMC_PEERABORTWAIT:
303 sk->sk_state = SMC_CLOSED;
304 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100305 case SMC_PROCESSABORT:
306 /* nothing to do, add tracing in future patch */
307 break;
308 }
309}
310
Ursula Braun732720f2018-01-25 11:15:35 +0100311/* Either some kind of closing has been received: peer_conn_closed,
312 * peer_conn_abort, or peer_done_writing
313 * or the link group of the connection terminates abnormally.
Ursula Braunb38d7322017-01-09 16:55:25 +0100314 */
Ursula Braun46c28db2017-04-10 14:58:01 +0200315static void smc_close_passive_work(struct work_struct *work)
Ursula Braunb38d7322017-01-09 16:55:25 +0100316{
Ursula Braun46c28db2017-04-10 14:58:01 +0200317 struct smc_connection *conn = container_of(work,
318 struct smc_connection,
319 close_work);
320 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
321 struct smc_cdc_conn_state_flags *rxflags;
Ursula Braunb38d7322017-01-09 16:55:25 +0100322 struct sock *sk = &smc->sk;
323 int old_state;
324
Ursula Braun3163c502018-01-24 10:28:12 +0100325 lock_sock(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100326 old_state = sk->sk_state;
327
Ursula Braun46c28db2017-04-10 14:58:01 +0200328 if (!conn->alert_token_local) {
329 /* abnormal termination */
330 smc_close_active_abort(smc);
331 goto wakeup;
332 }
333
Ursula Braun3163c502018-01-24 10:28:12 +0100334 rxflags = &conn->local_rx_ctrl.conn_state_flags;
Ursula Braunb38d7322017-01-09 16:55:25 +0100335 if (rxflags->peer_conn_abort) {
Ursula Braun732720f2018-01-25 11:15:35 +0100336 /* peer has not received all data */
Ursula Braunb38d7322017-01-09 16:55:25 +0100337 smc_close_passive_abort_received(smc);
Ursula Braun611b63a2018-01-25 11:15:31 +0100338 release_sock(&smc->sk);
339 cancel_delayed_work_sync(&conn->tx_work);
340 lock_sock(&smc->sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100341 goto wakeup;
342 }
343
344 switch (sk->sk_state) {
345 case SMC_INIT:
Ursula Braun3163c502018-01-24 10:28:12 +0100346 if (atomic_read(&conn->bytes_to_rcv) ||
Ursula Braunb38d7322017-01-09 16:55:25 +0100347 (rxflags->peer_done_writing &&
Ursula Braun51f1de72018-01-26 09:28:48 +0100348 !smc_cdc_rxed_any_close(conn))) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100349 sk->sk_state = SMC_APPCLOSEWAIT1;
Ursula Braun51f1de72018-01-26 09:28:48 +0100350 } else {
Ursula Braunb38d7322017-01-09 16:55:25 +0100351 sk->sk_state = SMC_CLOSED;
Ursula Braun51f1de72018-01-26 09:28:48 +0100352 sock_put(sk); /* passive closing */
353 }
Ursula Braunb38d7322017-01-09 16:55:25 +0100354 break;
355 case SMC_ACTIVE:
356 sk->sk_state = SMC_APPCLOSEWAIT1;
Ursula Braun51f1de72018-01-26 09:28:48 +0100357 /* postpone sock_put() for passive closing to cover
358 * received SEND_SHUTDOWN as well
359 */
Ursula Braunb38d7322017-01-09 16:55:25 +0100360 break;
361 case SMC_PEERCLOSEWAIT1:
362 if (rxflags->peer_done_writing)
363 sk->sk_state = SMC_PEERCLOSEWAIT2;
Gustavo A. R. Silva7f6b4372017-10-21 20:35:30 -0500364 /* fall through */
365 /* to check for closing */
Ursula Braunb38d7322017-01-09 16:55:25 +0100366 case SMC_PEERCLOSEWAIT2:
Ursula Braun3163c502018-01-24 10:28:12 +0100367 if (!smc_cdc_rxed_any_close(conn))
Ursula Braunb38d7322017-01-09 16:55:25 +0100368 break;
369 if (sock_flag(sk, SOCK_DEAD) &&
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200370 smc_close_sent_any_close(conn)) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100371 /* smc_release has already been called locally */
372 sk->sk_state = SMC_CLOSED;
373 } else {
374 /* just shutdown, but not yet closed locally */
375 sk->sk_state = SMC_APPFINCLOSEWAIT;
376 }
Ursula Braun51f1de72018-01-26 09:28:48 +0100377 sock_put(sk); /* passive closing */
Ursula Braunb38d7322017-01-09 16:55:25 +0100378 break;
Ursula Braun5ac92a02018-01-25 11:15:32 +0100379 case SMC_PEERFINCLOSEWAIT:
Ursula Braun51f1de72018-01-26 09:28:48 +0100380 if (smc_cdc_rxed_any_close(conn)) {
Ursula Braun5ac92a02018-01-25 11:15:32 +0100381 sk->sk_state = SMC_CLOSED;
Ursula Braun51f1de72018-01-26 09:28:48 +0100382 sock_put(sk); /* passive closing */
383 }
Ursula Braun5ac92a02018-01-25 11:15:32 +0100384 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100385 case SMC_APPCLOSEWAIT1:
386 case SMC_APPCLOSEWAIT2:
Ursula Braun51f1de72018-01-26 09:28:48 +0100387 /* postpone sock_put() for passive closing to cover
388 * received SEND_SHUTDOWN as well
389 */
390 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100391 case SMC_APPFINCLOSEWAIT:
392 case SMC_PEERABORTWAIT:
393 case SMC_PROCESSABORT:
394 case SMC_CLOSED:
395 /* nothing to do, add tracing in future patch */
396 break;
397 }
398
399wakeup:
Ursula Braunb38d7322017-01-09 16:55:25 +0100400 sk->sk_data_ready(sk); /* wakeup blocked rcvbuf consumers */
401 sk->sk_write_space(sk); /* wakeup blocked sndbuf producers */
402
Ursula Brauna98bf8c02017-04-10 14:58:02 +0200403 if (old_state != sk->sk_state) {
404 sk->sk_state_change(sk);
405 if ((sk->sk_state == SMC_CLOSED) &&
Ursula Braun51f1de72018-01-26 09:28:48 +0100406 (sock_flag(sk, SOCK_DEAD) || !sk->sk_socket))
Ursula Braun3163c502018-01-24 10:28:12 +0100407 smc_conn_free(conn);
Ursula Braunb38d7322017-01-09 16:55:25 +0100408 }
Ursula Braun3163c502018-01-24 10:28:12 +0100409 release_sock(sk);
Ursula Braun51f1de72018-01-26 09:28:48 +0100410 sock_put(sk); /* sock_hold done by schedulers of close_work */
Ursula Braunb38d7322017-01-09 16:55:25 +0100411}
412
413int smc_close_shutdown_write(struct smc_sock *smc)
414{
415 struct smc_connection *conn = &smc->conn;
Ursula Braunb38d7322017-01-09 16:55:25 +0100416 struct sock *sk = &smc->sk;
417 int old_state;
Ursula Braun8c96fee2017-09-21 09:16:34 +0200418 long timeout;
Ursula Braunb38d7322017-01-09 16:55:25 +0100419 int rc = 0;
420
Ursula Braun8c96fee2017-09-21 09:16:34 +0200421 timeout = current->flags & PF_EXITING ?
422 0 : sock_flag(sk, SOCK_LINGER) ?
423 sk->sk_lingertime : SMC_MAX_STREAM_WAIT_TIMEOUT;
Ursula Braunb38d7322017-01-09 16:55:25 +0100424
Ursula Braunb38d7322017-01-09 16:55:25 +0100425 old_state = sk->sk_state;
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100426again:
427 switch (sk->sk_state) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100428 case SMC_ACTIVE:
429 smc_close_stream_wait(smc, timeout);
430 release_sock(sk);
Ursula Braun18e537c2017-09-21 09:16:33 +0200431 cancel_delayed_work_sync(&conn->tx_work);
Ursula Braunb38d7322017-01-09 16:55:25 +0100432 lock_sock(sk);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100433 if (sk->sk_state != SMC_ACTIVE)
434 goto again;
Ursula Braunb38d7322017-01-09 16:55:25 +0100435 /* send close wr request */
436 rc = smc_close_wr(conn);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100437 if (rc)
438 break;
439 sk->sk_state = SMC_PEERCLOSEWAIT1;
Ursula Braunb38d7322017-01-09 16:55:25 +0100440 break;
441 case SMC_APPCLOSEWAIT1:
442 /* passive close */
443 if (!smc_cdc_rxed_any_close(conn))
444 smc_close_stream_wait(smc, timeout);
445 release_sock(sk);
Ursula Braun18e537c2017-09-21 09:16:33 +0200446 cancel_delayed_work_sync(&conn->tx_work);
Ursula Braunb38d7322017-01-09 16:55:25 +0100447 lock_sock(sk);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100448 if (sk->sk_state != SMC_APPCLOSEWAIT1)
449 goto again;
Ursula Braunb38d7322017-01-09 16:55:25 +0100450 /* confirm close from peer */
451 rc = smc_close_wr(conn);
Ursula Braunbbb96bf2018-01-24 10:28:16 +0100452 if (rc)
453 break;
Ursula Braunb38d7322017-01-09 16:55:25 +0100454 sk->sk_state = SMC_APPCLOSEWAIT2;
455 break;
456 case SMC_APPCLOSEWAIT2:
457 case SMC_PEERFINCLOSEWAIT:
458 case SMC_PEERCLOSEWAIT1:
459 case SMC_PEERCLOSEWAIT2:
460 case SMC_APPFINCLOSEWAIT:
461 case SMC_PROCESSABORT:
462 case SMC_PEERABORTWAIT:
463 /* nothing to do, add tracing in future patch */
464 break;
465 }
466
467 if (old_state != sk->sk_state)
Ursula Braun3163c502018-01-24 10:28:12 +0100468 sk->sk_state_change(sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100469 return rc;
470}
Ursula Braun46c28db2017-04-10 14:58:01 +0200471
472/* Initialize close properties on connection establishment. */
473void smc_close_init(struct smc_sock *smc)
474{
475 INIT_WORK(&smc->conn.close_work, smc_close_passive_work);
476}