blob: a712c9f8699b19f6d61098dc8473ae1d57a2680d [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braun5f083182017-01-09 16:55:22 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Connection Data Control (CDC)
6 * handles flow control
7 *
8 * Copyright IBM Corp. 2016
9 *
10 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
11 */
12
13#include <linux/spinlock.h>
14
15#include "smc.h"
16#include "smc_wr.h"
17#include "smc_cdc.h"
Ursula Braune6727f32017-01-09 16:55:23 +010018#include "smc_tx.h"
Ursula Braun952310c2017-01-09 16:55:24 +010019#include "smc_rx.h"
Ursula Braunb38d7322017-01-09 16:55:25 +010020#include "smc_close.h"
Ursula Braun5f083182017-01-09 16:55:22 +010021
22/********************************** send *************************************/
23
Ursula Braun5f083182017-01-09 16:55:22 +010024/* handler for send/transmission completion of a CDC msg */
25static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
26 struct smc_link *link,
27 enum ib_wc_status wc_status)
28{
29 struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
Stefan Rasplbac6de72018-07-23 13:53:09 +020030 struct smc_connection *conn = cdcpend->conn;
Ursula Braun5f083182017-01-09 16:55:22 +010031 struct smc_sock *smc;
32 int diff;
33
Stefan Rasplbac6de72018-07-23 13:53:09 +020034 if (!conn)
Ursula Braun5f083182017-01-09 16:55:22 +010035 /* already dismissed */
36 return;
37
Stefan Rasplbac6de72018-07-23 13:53:09 +020038 smc = container_of(conn, struct smc_sock, conn);
Ursula Braun5f083182017-01-09 16:55:22 +010039 bh_lock_sock(&smc->sk);
40 if (!wc_status) {
Hans Wippel69cb7dc2018-05-18 09:34:10 +020041 diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
Ursula Braun5f083182017-01-09 16:55:22 +010042 &cdcpend->conn->tx_curs_fin,
43 &cdcpend->cursor);
44 /* sndbuf_space is decreased in smc_sendmsg */
45 smp_mb__before_atomic();
46 atomic_add(diff, &cdcpend->conn->sndbuf_space);
Hans Wippel69cb7dc2018-05-18 09:34:10 +020047 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
Ursula Braun5f083182017-01-09 16:55:22 +010048 smp_mb__after_atomic();
Stefan Rasplbac6de72018-07-23 13:53:09 +020049 smc_curs_copy(&conn->tx_curs_fin, &cdcpend->cursor, conn);
Ursula Braun5f083182017-01-09 16:55:22 +010050 }
Ursula Braune6727f32017-01-09 16:55:23 +010051 smc_tx_sndbuf_nonfull(smc);
Ursula Braun5f083182017-01-09 16:55:22 +010052 bh_unlock_sock(&smc->sk);
53}
54
Ursula Braun51957bc2017-09-21 09:17:34 +020055int smc_cdc_get_free_slot(struct smc_connection *conn,
Ursula Braun5f083182017-01-09 16:55:22 +010056 struct smc_wr_buf **wr_buf,
Ursula Braunad6f3172019-02-04 13:44:44 +010057 struct smc_rdma_wr **wr_rdma_buf,
Ursula Braun5f083182017-01-09 16:55:22 +010058 struct smc_cdc_tx_pend **pend)
59{
Ursula Braun51957bc2017-09-21 09:17:34 +020060 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
Ursula Braun1a0a04c2018-01-25 11:15:36 +010061 int rc;
Ursula Braun51957bc2017-09-21 09:17:34 +020062
Ursula Braun1a0a04c2018-01-25 11:15:36 +010063 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
Ursula Braunad6f3172019-02-04 13:44:44 +010064 wr_rdma_buf,
Ursula Braun1a0a04c2018-01-25 11:15:36 +010065 (struct smc_wr_tx_pend_priv **)pend);
66 if (!conn->alert_token_local)
67 /* abnormal termination */
68 rc = -EPIPE;
69 return rc;
Ursula Braun5f083182017-01-09 16:55:22 +010070}
71
72static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
73 struct smc_cdc_tx_pend *pend)
74{
75 BUILD_BUG_ON_MSG(
76 sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
77 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
78 BUILD_BUG_ON_MSG(
Ursula Braunb9a22dd2018-11-20 16:46:42 +010079 offsetofend(struct smc_cdc_msg, reserved) > SMC_WR_TX_SIZE,
Ursula Braun5f083182017-01-09 16:55:22 +010080 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
81 BUILD_BUG_ON_MSG(
82 sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
83 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_cdc_tx_pend)");
84 pend->conn = conn;
85 pend->cursor = conn->tx_curs_sent;
86 pend->p_cursor = conn->local_tx_ctrl.prod;
87 pend->ctrl_seq = conn->tx_cdc_seq;
88}
89
90int smc_cdc_msg_send(struct smc_connection *conn,
91 struct smc_wr_buf *wr_buf,
92 struct smc_cdc_tx_pend *pend)
93{
Ursula Braunb8649ef2019-02-04 13:44:45 +010094 union smc_host_cursor cfed;
Ursula Braun5f083182017-01-09 16:55:22 +010095 struct smc_link *link;
96 int rc;
97
98 link = &conn->lgr->lnk[SMC_SINGLE_LINK];
99
100 smc_cdc_add_pending_send(conn, pend);
101
102 conn->tx_cdc_seq++;
103 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
104 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf,
105 &conn->local_tx_ctrl, conn);
Ursula Braunb8649ef2019-02-04 13:44:45 +0100106 smc_curs_copy(&cfed, &((struct smc_host_cdc_msg *)wr_buf)->cons, conn);
Ursula Braun5f083182017-01-09 16:55:22 +0100107 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
108 if (!rc)
Ursula Braunb8649ef2019-02-04 13:44:45 +0100109 smc_curs_copy(&conn->rx_curs_confirmed, &cfed, conn);
Ursula Braun5f083182017-01-09 16:55:22 +0100110
111 return rc;
112}
113
Hans Wippelbe244f22018-06-28 19:05:10 +0200114static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
Ursula Braun5f083182017-01-09 16:55:22 +0100115{
116 struct smc_cdc_tx_pend *pend;
117 struct smc_wr_buf *wr_buf;
118 int rc;
119
Ursula Braunad6f3172019-02-04 13:44:44 +0100120 rc = smc_cdc_get_free_slot(conn, &wr_buf, NULL, &pend);
Ursula Braun5f083182017-01-09 16:55:22 +0100121 if (rc)
122 return rc;
123
Karsten Graul2dee25a2019-01-30 18:51:06 +0100124 spin_lock_bh(&conn->send_lock);
125 rc = smc_cdc_msg_send(conn, wr_buf, pend);
126 spin_unlock_bh(&conn->send_lock);
127 return rc;
Ursula Braun5f083182017-01-09 16:55:22 +0100128}
129
Hans Wippelbe244f22018-06-28 19:05:10 +0200130int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
131{
132 int rc;
133
134 if (conn->lgr->is_smcd) {
135 spin_lock_bh(&conn->send_lock);
136 rc = smcd_cdc_msg_send(conn);
137 spin_unlock_bh(&conn->send_lock);
138 } else {
139 rc = smcr_cdc_get_slot_and_msg_send(conn);
140 }
141
142 return rc;
143}
144
Ursula Braun5f083182017-01-09 16:55:22 +0100145static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
146 unsigned long data)
147{
148 struct smc_connection *conn = (struct smc_connection *)data;
149 struct smc_cdc_tx_pend *cdc_pend =
150 (struct smc_cdc_tx_pend *)tx_pend;
151
152 return cdc_pend->conn == conn;
153}
154
155static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
156{
157 struct smc_cdc_tx_pend *cdc_pend =
158 (struct smc_cdc_tx_pend *)tx_pend;
159
160 cdc_pend->conn = NULL;
161}
162
163void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
164{
165 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
166
167 smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
168 smc_cdc_tx_filter, smc_cdc_tx_dismisser,
169 (unsigned long)conn);
170}
171
Hans Wippelbe244f22018-06-28 19:05:10 +0200172/* Send a SMC-D CDC header.
173 * This increments the free space available in our send buffer.
174 * Also update the confirmed receive buffer with what was sent to the peer.
175 */
176int smcd_cdc_msg_send(struct smc_connection *conn)
177{
178 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
Ursula Braunb9a22dd2018-11-20 16:46:42 +0100179 union smc_host_cursor curs;
Hans Wippelbe244f22018-06-28 19:05:10 +0200180 struct smcd_cdc_msg cdc;
181 int rc, diff;
182
183 memset(&cdc, 0, sizeof(cdc));
184 cdc.common.type = SMC_CDC_MSG_TYPE;
Ursula Braunb9a22dd2018-11-20 16:46:42 +0100185 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.prod.acurs);
186 cdc.prod.wrap = curs.wrap;
187 cdc.prod.count = curs.count;
188 curs.acurs.counter = atomic64_read(&conn->local_tx_ctrl.cons.acurs);
189 cdc.cons.wrap = curs.wrap;
190 cdc.cons.count = curs.count;
191 cdc.cons.prod_flags = conn->local_tx_ctrl.prod_flags;
192 cdc.cons.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
Hans Wippelbe244f22018-06-28 19:05:10 +0200193 rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
194 if (rc)
195 return rc;
Ursula Braunb9a22dd2018-11-20 16:46:42 +0100196 smc_curs_copy(&conn->rx_curs_confirmed, &curs, conn);
Hans Wippelbe244f22018-06-28 19:05:10 +0200197 /* Calculate transmitted data and increment free send buffer space */
198 diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
199 &conn->tx_curs_sent);
200 /* increased by confirmed number of bytes */
201 smp_mb__before_atomic();
202 atomic_add(diff, &conn->sndbuf_space);
203 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
204 smp_mb__after_atomic();
Stefan Rasplbac6de72018-07-23 13:53:09 +0200205 smc_curs_copy(&conn->tx_curs_fin, &conn->tx_curs_sent, conn);
Hans Wippelbe244f22018-06-28 19:05:10 +0200206
207 smc_tx_sndbuf_nonfull(smc);
208 return rc;
209}
210
Ursula Braun5f083182017-01-09 16:55:22 +0100211/********************************* receive ***********************************/
212
213static inline bool smc_cdc_before(u16 seq1, u16 seq2)
214{
215 return (s16)(seq1 - seq2) < 0;
216}
217
Stefan Rasplde8474e2018-05-23 16:38:11 +0200218static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc,
219 int *diff_prod)
220{
221 struct smc_connection *conn = &smc->conn;
222 char *base;
223
224 /* new data included urgent business */
Stefan Rasplbac6de72018-07-23 13:53:09 +0200225 smc_curs_copy(&conn->urg_curs, &conn->local_rx_ctrl.prod, conn);
Stefan Rasplde8474e2018-05-23 16:38:11 +0200226 conn->urg_state = SMC_URG_VALID;
227 if (!sock_flag(&smc->sk, SOCK_URGINLINE))
228 /* we'll skip the urgent byte, so don't account for it */
229 (*diff_prod)--;
Hans Wippelbe244f22018-06-28 19:05:10 +0200230 base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off;
Stefan Rasplde8474e2018-05-23 16:38:11 +0200231 if (conn->urg_curs.count)
232 conn->urg_rx_byte = *(base + conn->urg_curs.count - 1);
233 else
234 conn->urg_rx_byte = *(base + conn->rmb_desc->len - 1);
235 sk_send_sigurg(&smc->sk);
236}
237
Ursula Braun5f083182017-01-09 16:55:22 +0100238static void smc_cdc_msg_recv_action(struct smc_sock *smc,
Ursula Braun5f083182017-01-09 16:55:22 +0100239 struct smc_cdc_msg *cdc)
240{
241 union smc_host_cursor cons_old, prod_old;
242 struct smc_connection *conn = &smc->conn;
243 int diff_cons, diff_prod;
244
Stefan Rasplbac6de72018-07-23 13:53:09 +0200245 smc_curs_copy(&prod_old, &conn->local_rx_ctrl.prod, conn);
246 smc_curs_copy(&cons_old, &conn->local_rx_ctrl.cons, conn);
Ursula Braun5f083182017-01-09 16:55:22 +0100247 smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn);
248
249 diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old,
250 &conn->local_rx_ctrl.cons);
251 if (diff_cons) {
252 /* peer_rmbe_space is decreased during data transfer with RDMA
253 * write
254 */
255 smp_mb__before_atomic();
256 atomic_add(diff_cons, &conn->peer_rmbe_space);
257 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
258 smp_mb__after_atomic();
259 }
260
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200261 diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old,
Ursula Braun5f083182017-01-09 16:55:22 +0100262 &conn->local_rx_ctrl.prod);
263 if (diff_prod) {
Stefan Rasplde8474e2018-05-23 16:38:11 +0200264 if (conn->local_rx_ctrl.prod_flags.urg_data_present)
265 smc_cdc_handle_urg_data_arrival(smc, &diff_prod);
Ursula Braun5f083182017-01-09 16:55:22 +0100266 /* bytes_to_rcv is decreased in smc_recvmsg */
267 smp_mb__before_atomic();
268 atomic_add(diff_prod, &conn->bytes_to_rcv);
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200269 /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
Ursula Braun5f083182017-01-09 16:55:22 +0100270 smp_mb__after_atomic();
Ursula Braun952310c2017-01-09 16:55:24 +0100271 smc->sk.sk_data_ready(&smc->sk);
Stefan Rasplde8474e2018-05-23 16:38:11 +0200272 } else {
273 if (conn->local_rx_ctrl.prod_flags.write_blocked ||
274 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
275 conn->local_rx_ctrl.prod_flags.urg_data_pending) {
276 if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
277 conn->urg_state = SMC_URG_NOTYET;
278 /* force immediate tx of current consumer cursor, but
279 * under send_lock to guarantee arrival in seqno-order
280 */
Ursula Braun56070162018-08-03 10:38:33 +0200281 if (smc->sk.sk_state != SMC_INIT)
282 smc_tx_sndbuf_nonempty(conn);
Stefan Rasplde8474e2018-05-23 16:38:11 +0200283 }
Ursula Braun5f083182017-01-09 16:55:22 +0100284 }
285
Ursula Braun51f1de72018-01-26 09:28:48 +0100286 /* piggy backed tx info */
287 /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
288 if (diff_cons && smc_tx_prepared_sends(conn)) {
289 smc_tx_sndbuf_nonempty(conn);
290 /* trigger socket release if connection closed */
291 smc_close_wake_tx_prepared(smc);
292 }
Stefan Rasplde8474e2018-05-23 16:38:11 +0200293 if (diff_cons && conn->urg_tx_pend &&
294 atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {
295 /* urg data confirmed by peer, indicate we're ready for more */
296 conn->urg_tx_pend = false;
297 smc->sk.sk_write_space(&smc->sk);
298 }
Ursula Braun51f1de72018-01-26 09:28:48 +0100299
Ursula Braunb38d7322017-01-09 16:55:25 +0100300 if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
Ursula Braun5f083182017-01-09 16:55:22 +0100301 smc->sk.sk_err = ECONNRESET;
Ursula Braunb38d7322017-01-09 16:55:25 +0100302 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
303 }
Ursula Braun46c28db2017-04-10 14:58:01 +0200304 if (smc_cdc_rxed_any_close_or_senddone(conn)) {
305 smc->sk.sk_shutdown |= RCV_SHUTDOWN;
306 if (smc->clcsock && smc->clcsock->sk)
307 smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
308 sock_set_flag(&smc->sk, SOCK_DONE);
Ursula Braun51f1de72018-01-26 09:28:48 +0100309 sock_hold(&smc->sk); /* sock_put in close_work */
310 if (!schedule_work(&conn->close_work))
311 sock_put(&smc->sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100312 }
Ursula Braun5f083182017-01-09 16:55:22 +0100313}
314
315/* called under tasklet context */
Hans Wippeld7b0e372018-05-18 09:34:15 +0200316static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
Ursula Braun5f083182017-01-09 16:55:22 +0100317{
Ursula Braun5f083182017-01-09 16:55:22 +0100318 sock_hold(&smc->sk);
Ursula Braun5f083182017-01-09 16:55:22 +0100319 bh_lock_sock(&smc->sk);
Hans Wippeld7b0e372018-05-18 09:34:15 +0200320 smc_cdc_msg_recv_action(smc, cdc);
Ursula Braun5f083182017-01-09 16:55:22 +0100321 bh_unlock_sock(&smc->sk);
322 sock_put(&smc->sk); /* no free sk in softirq-context */
323}
324
Hans Wippelbe244f22018-06-28 19:05:10 +0200325/* Schedule a tasklet for this connection. Triggered from the ISM device IRQ
326 * handler to indicate update in the DMBE.
327 *
328 * Context:
329 * - tasklet context
330 */
331static void smcd_cdc_rx_tsklet(unsigned long data)
332{
333 struct smc_connection *conn = (struct smc_connection *)data;
Ursula Braunb9a22dd2018-11-20 16:46:42 +0100334 struct smcd_cdc_msg *data_cdc;
Hans Wippelbe244f22018-06-28 19:05:10 +0200335 struct smcd_cdc_msg cdc;
336 struct smc_sock *smc;
337
338 if (!conn)
339 return;
340
Ursula Braunb9a22dd2018-11-20 16:46:42 +0100341 data_cdc = (struct smcd_cdc_msg *)conn->rmb_desc->cpu_addr;
342 smcd_curs_copy(&cdc.prod, &data_cdc->prod, conn);
343 smcd_curs_copy(&cdc.cons, &data_cdc->cons, conn);
Hans Wippelbe244f22018-06-28 19:05:10 +0200344 smc = container_of(conn, struct smc_sock, conn);
345 smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
346}
347
348/* Initialize receive tasklet. Called from ISM device IRQ handler to start
349 * receiver side.
350 */
351void smcd_cdc_rx_init(struct smc_connection *conn)
352{
353 tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn);
354}
355
Ursula Braun5f083182017-01-09 16:55:22 +0100356/***************************** init, exit, misc ******************************/
357
358static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
359{
360 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
361 struct smc_cdc_msg *cdc = buf;
Hans Wippeld7b0e372018-05-18 09:34:15 +0200362 struct smc_connection *conn;
363 struct smc_link_group *lgr;
364 struct smc_sock *smc;
Ursula Braun5f083182017-01-09 16:55:22 +0100365
366 if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved))
367 return; /* short message */
Karsten Graulcbba07a2018-02-28 12:44:07 +0100368 if (cdc->len != SMC_WR_TX_SIZE)
Ursula Braun5f083182017-01-09 16:55:22 +0100369 return; /* invalid message */
Hans Wippeld7b0e372018-05-18 09:34:15 +0200370
371 /* lookup connection */
Stefan Raspl00e5fb22018-07-23 13:53:10 +0200372 lgr = smc_get_lgr(link);
Hans Wippeld7b0e372018-05-18 09:34:15 +0200373 read_lock_bh(&lgr->conns_lock);
374 conn = smc_lgr_find_conn(ntohl(cdc->token), lgr);
375 read_unlock_bh(&lgr->conns_lock);
376 if (!conn)
377 return;
378 smc = container_of(conn, struct smc_sock, conn);
379
380 if (!cdc->prod_flags.failover_validation) {
381 if (smc_cdc_before(ntohs(cdc->seqno),
382 conn->local_rx_ctrl.seqno))
383 /* received seqno is old */
384 return;
385 }
386 smc_cdc_msg_recv(smc, cdc);
Ursula Braun5f083182017-01-09 16:55:22 +0100387}
388
389static struct smc_wr_rx_handler smc_cdc_rx_handlers[] = {
390 {
391 .handler = smc_cdc_rx_handler,
392 .type = SMC_CDC_MSG_TYPE
393 },
394 {
395 .handler = NULL,
396 }
397};
398
399int __init smc_cdc_init(void)
400{
401 struct smc_wr_rx_handler *handler;
402 int rc = 0;
403
404 for (handler = smc_cdc_rx_handlers; handler->handler; handler++) {
405 INIT_HLIST_NODE(&handler->list);
406 rc = smc_wr_rx_register_handler(handler);
407 if (rc)
408 break;
409 }
410 return rc;
411}