blob: 621d8cca570ba38568295fdaaef0380830ab0990 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braun5f083182017-01-09 16:55:22 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Connection Data Control (CDC)
6 * handles flow control
7 *
8 * Copyright IBM Corp. 2016
9 *
10 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
11 */
12
13#include <linux/spinlock.h>
14
15#include "smc.h"
16#include "smc_wr.h"
17#include "smc_cdc.h"
Ursula Braune6727f32017-01-09 16:55:23 +010018#include "smc_tx.h"
Ursula Braun952310c2017-01-09 16:55:24 +010019#include "smc_rx.h"
Ursula Braunb38d7322017-01-09 16:55:25 +010020#include "smc_close.h"
Ursula Braun5f083182017-01-09 16:55:22 +010021
22/********************************** send *************************************/
23
24struct smc_cdc_tx_pend {
25 struct smc_connection *conn; /* socket connection */
26 union smc_host_cursor cursor; /* tx sndbuf cursor sent */
27 union smc_host_cursor p_cursor; /* rx RMBE cursor produced */
28 u16 ctrl_seq; /* conn. tx sequence # */
29};
30
31/* handler for send/transmission completion of a CDC msg */
32static void smc_cdc_tx_handler(struct smc_wr_tx_pend_priv *pnd_snd,
33 struct smc_link *link,
34 enum ib_wc_status wc_status)
35{
36 struct smc_cdc_tx_pend *cdcpend = (struct smc_cdc_tx_pend *)pnd_snd;
37 struct smc_sock *smc;
38 int diff;
39
40 if (!cdcpend->conn)
41 /* already dismissed */
42 return;
43
44 smc = container_of(cdcpend->conn, struct smc_sock, conn);
45 bh_lock_sock(&smc->sk);
46 if (!wc_status) {
Hans Wippel69cb7dc2018-05-18 09:34:10 +020047 diff = smc_curs_diff(cdcpend->conn->sndbuf_desc->len,
Ursula Braun5f083182017-01-09 16:55:22 +010048 &cdcpend->conn->tx_curs_fin,
49 &cdcpend->cursor);
50 /* sndbuf_space is decreased in smc_sendmsg */
51 smp_mb__before_atomic();
52 atomic_add(diff, &cdcpend->conn->sndbuf_space);
Hans Wippel69cb7dc2018-05-18 09:34:10 +020053 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
Ursula Braun5f083182017-01-09 16:55:22 +010054 smp_mb__after_atomic();
55 smc_curs_write(&cdcpend->conn->tx_curs_fin,
56 smc_curs_read(&cdcpend->cursor, cdcpend->conn),
57 cdcpend->conn);
58 }
Ursula Braune6727f32017-01-09 16:55:23 +010059 smc_tx_sndbuf_nonfull(smc);
Ursula Braun5f083182017-01-09 16:55:22 +010060 bh_unlock_sock(&smc->sk);
61}
62
Ursula Braun51957bc2017-09-21 09:17:34 +020063int smc_cdc_get_free_slot(struct smc_connection *conn,
Ursula Braun5f083182017-01-09 16:55:22 +010064 struct smc_wr_buf **wr_buf,
65 struct smc_cdc_tx_pend **pend)
66{
Ursula Braun51957bc2017-09-21 09:17:34 +020067 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
Ursula Braun1a0a04c2018-01-25 11:15:36 +010068 int rc;
Ursula Braun51957bc2017-09-21 09:17:34 +020069
Ursula Braun1a0a04c2018-01-25 11:15:36 +010070 rc = smc_wr_tx_get_free_slot(link, smc_cdc_tx_handler, wr_buf,
71 (struct smc_wr_tx_pend_priv **)pend);
72 if (!conn->alert_token_local)
73 /* abnormal termination */
74 rc = -EPIPE;
75 return rc;
Ursula Braun5f083182017-01-09 16:55:22 +010076}
77
78static inline void smc_cdc_add_pending_send(struct smc_connection *conn,
79 struct smc_cdc_tx_pend *pend)
80{
81 BUILD_BUG_ON_MSG(
82 sizeof(struct smc_cdc_msg) > SMC_WR_BUF_SIZE,
83 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_cdc_msg)");
84 BUILD_BUG_ON_MSG(
Karsten Graul33825762018-04-26 17:18:20 +020085 sizeof(struct smc_cdc_msg) != SMC_WR_TX_SIZE,
Ursula Braun5f083182017-01-09 16:55:22 +010086 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_cdc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()");
87 BUILD_BUG_ON_MSG(
88 sizeof(struct smc_cdc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE,
89 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_cdc_tx_pend)");
90 pend->conn = conn;
91 pend->cursor = conn->tx_curs_sent;
92 pend->p_cursor = conn->local_tx_ctrl.prod;
93 pend->ctrl_seq = conn->tx_cdc_seq;
94}
95
96int smc_cdc_msg_send(struct smc_connection *conn,
97 struct smc_wr_buf *wr_buf,
98 struct smc_cdc_tx_pend *pend)
99{
100 struct smc_link *link;
101 int rc;
102
103 link = &conn->lgr->lnk[SMC_SINGLE_LINK];
104
105 smc_cdc_add_pending_send(conn, pend);
106
107 conn->tx_cdc_seq++;
108 conn->local_tx_ctrl.seqno = conn->tx_cdc_seq;
109 smc_host_msg_to_cdc((struct smc_cdc_msg *)wr_buf,
110 &conn->local_tx_ctrl, conn);
111 rc = smc_wr_tx_send(link, (struct smc_wr_tx_pend_priv *)pend);
112 if (!rc)
113 smc_curs_write(&conn->rx_curs_confirmed,
114 smc_curs_read(&conn->local_tx_ctrl.cons, conn),
115 conn);
116
117 return rc;
118}
119
Hans Wippelbe244f22018-06-28 19:05:10 +0200120static int smcr_cdc_get_slot_and_msg_send(struct smc_connection *conn)
Ursula Braun5f083182017-01-09 16:55:22 +0100121{
122 struct smc_cdc_tx_pend *pend;
123 struct smc_wr_buf *wr_buf;
124 int rc;
125
Ursula Braun51957bc2017-09-21 09:17:34 +0200126 rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend);
Ursula Braun5f083182017-01-09 16:55:22 +0100127 if (rc)
128 return rc;
129
130 return smc_cdc_msg_send(conn, wr_buf, pend);
131}
132
Hans Wippelbe244f22018-06-28 19:05:10 +0200133int smc_cdc_get_slot_and_msg_send(struct smc_connection *conn)
134{
135 int rc;
136
137 if (conn->lgr->is_smcd) {
138 spin_lock_bh(&conn->send_lock);
139 rc = smcd_cdc_msg_send(conn);
140 spin_unlock_bh(&conn->send_lock);
141 } else {
142 rc = smcr_cdc_get_slot_and_msg_send(conn);
143 }
144
145 return rc;
146}
147
Ursula Braun5f083182017-01-09 16:55:22 +0100148static bool smc_cdc_tx_filter(struct smc_wr_tx_pend_priv *tx_pend,
149 unsigned long data)
150{
151 struct smc_connection *conn = (struct smc_connection *)data;
152 struct smc_cdc_tx_pend *cdc_pend =
153 (struct smc_cdc_tx_pend *)tx_pend;
154
155 return cdc_pend->conn == conn;
156}
157
158static void smc_cdc_tx_dismisser(struct smc_wr_tx_pend_priv *tx_pend)
159{
160 struct smc_cdc_tx_pend *cdc_pend =
161 (struct smc_cdc_tx_pend *)tx_pend;
162
163 cdc_pend->conn = NULL;
164}
165
166void smc_cdc_tx_dismiss_slots(struct smc_connection *conn)
167{
168 struct smc_link *link = &conn->lgr->lnk[SMC_SINGLE_LINK];
169
170 smc_wr_tx_dismiss_slots(link, SMC_CDC_MSG_TYPE,
171 smc_cdc_tx_filter, smc_cdc_tx_dismisser,
172 (unsigned long)conn);
173}
174
Hans Wippelbe244f22018-06-28 19:05:10 +0200175/* Send a SMC-D CDC header.
176 * This increments the free space available in our send buffer.
177 * Also update the confirmed receive buffer with what was sent to the peer.
178 */
179int smcd_cdc_msg_send(struct smc_connection *conn)
180{
181 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
182 struct smcd_cdc_msg cdc;
183 int rc, diff;
184
185 memset(&cdc, 0, sizeof(cdc));
186 cdc.common.type = SMC_CDC_MSG_TYPE;
187 cdc.prod_wrap = conn->local_tx_ctrl.prod.wrap;
188 cdc.prod_count = conn->local_tx_ctrl.prod.count;
189
190 cdc.cons_wrap = conn->local_tx_ctrl.cons.wrap;
191 cdc.cons_count = conn->local_tx_ctrl.cons.count;
192 cdc.prod_flags = conn->local_tx_ctrl.prod_flags;
193 cdc.conn_state_flags = conn->local_tx_ctrl.conn_state_flags;
194 rc = smcd_tx_ism_write(conn, &cdc, sizeof(cdc), 0, 1);
195 if (rc)
196 return rc;
197 smc_curs_write(&conn->rx_curs_confirmed,
198 smc_curs_read(&conn->local_tx_ctrl.cons, conn), conn);
199 /* Calculate transmitted data and increment free send buffer space */
200 diff = smc_curs_diff(conn->sndbuf_desc->len, &conn->tx_curs_fin,
201 &conn->tx_curs_sent);
202 /* increased by confirmed number of bytes */
203 smp_mb__before_atomic();
204 atomic_add(diff, &conn->sndbuf_space);
205 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
206 smp_mb__after_atomic();
207 smc_curs_write(&conn->tx_curs_fin,
208 smc_curs_read(&conn->tx_curs_sent, conn), conn);
209
210 smc_tx_sndbuf_nonfull(smc);
211 return rc;
212}
213
Ursula Braun5f083182017-01-09 16:55:22 +0100214/********************************* receive ***********************************/
215
216static inline bool smc_cdc_before(u16 seq1, u16 seq2)
217{
218 return (s16)(seq1 - seq2) < 0;
219}
220
Stefan Rasplde8474e2018-05-23 16:38:11 +0200221static void smc_cdc_handle_urg_data_arrival(struct smc_sock *smc,
222 int *diff_prod)
223{
224 struct smc_connection *conn = &smc->conn;
225 char *base;
226
227 /* new data included urgent business */
228 smc_curs_write(&conn->urg_curs,
229 smc_curs_read(&conn->local_rx_ctrl.prod, conn),
230 conn);
231 conn->urg_state = SMC_URG_VALID;
232 if (!sock_flag(&smc->sk, SOCK_URGINLINE))
233 /* we'll skip the urgent byte, so don't account for it */
234 (*diff_prod)--;
Hans Wippelbe244f22018-06-28 19:05:10 +0200235 base = (char *)conn->rmb_desc->cpu_addr + conn->rx_off;
Stefan Rasplde8474e2018-05-23 16:38:11 +0200236 if (conn->urg_curs.count)
237 conn->urg_rx_byte = *(base + conn->urg_curs.count - 1);
238 else
239 conn->urg_rx_byte = *(base + conn->rmb_desc->len - 1);
240 sk_send_sigurg(&smc->sk);
241}
242
Ursula Braun5f083182017-01-09 16:55:22 +0100243static void smc_cdc_msg_recv_action(struct smc_sock *smc,
Ursula Braun5f083182017-01-09 16:55:22 +0100244 struct smc_cdc_msg *cdc)
245{
246 union smc_host_cursor cons_old, prod_old;
247 struct smc_connection *conn = &smc->conn;
248 int diff_cons, diff_prod;
249
Ursula Braun5f083182017-01-09 16:55:22 +0100250 smc_curs_write(&prod_old,
251 smc_curs_read(&conn->local_rx_ctrl.prod, conn),
252 conn);
253 smc_curs_write(&cons_old,
254 smc_curs_read(&conn->local_rx_ctrl.cons, conn),
255 conn);
256 smc_cdc_msg_to_host(&conn->local_rx_ctrl, cdc, conn);
257
258 diff_cons = smc_curs_diff(conn->peer_rmbe_size, &cons_old,
259 &conn->local_rx_ctrl.cons);
260 if (diff_cons) {
261 /* peer_rmbe_space is decreased during data transfer with RDMA
262 * write
263 */
264 smp_mb__before_atomic();
265 atomic_add(diff_cons, &conn->peer_rmbe_space);
266 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
267 smp_mb__after_atomic();
268 }
269
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200270 diff_prod = smc_curs_diff(conn->rmb_desc->len, &prod_old,
Ursula Braun5f083182017-01-09 16:55:22 +0100271 &conn->local_rx_ctrl.prod);
272 if (diff_prod) {
Stefan Rasplde8474e2018-05-23 16:38:11 +0200273 if (conn->local_rx_ctrl.prod_flags.urg_data_present)
274 smc_cdc_handle_urg_data_arrival(smc, &diff_prod);
Ursula Braun5f083182017-01-09 16:55:22 +0100275 /* bytes_to_rcv is decreased in smc_recvmsg */
276 smp_mb__before_atomic();
277 atomic_add(diff_prod, &conn->bytes_to_rcv);
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200278 /* guarantee 0 <= bytes_to_rcv <= rmb_desc->len */
Ursula Braun5f083182017-01-09 16:55:22 +0100279 smp_mb__after_atomic();
Ursula Braun952310c2017-01-09 16:55:24 +0100280 smc->sk.sk_data_ready(&smc->sk);
Stefan Rasplde8474e2018-05-23 16:38:11 +0200281 } else {
282 if (conn->local_rx_ctrl.prod_flags.write_blocked ||
283 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
284 conn->local_rx_ctrl.prod_flags.urg_data_pending) {
285 if (conn->local_rx_ctrl.prod_flags.urg_data_pending)
286 conn->urg_state = SMC_URG_NOTYET;
287 /* force immediate tx of current consumer cursor, but
288 * under send_lock to guarantee arrival in seqno-order
289 */
290 smc_tx_sndbuf_nonempty(conn);
291 }
Ursula Braun5f083182017-01-09 16:55:22 +0100292 }
293
Ursula Braun51f1de72018-01-26 09:28:48 +0100294 /* piggy backed tx info */
295 /* trigger sndbuf consumer: RDMA write into peer RMBE and CDC */
296 if (diff_cons && smc_tx_prepared_sends(conn)) {
297 smc_tx_sndbuf_nonempty(conn);
298 /* trigger socket release if connection closed */
299 smc_close_wake_tx_prepared(smc);
300 }
Stefan Rasplde8474e2018-05-23 16:38:11 +0200301 if (diff_cons && conn->urg_tx_pend &&
302 atomic_read(&conn->peer_rmbe_space) == conn->peer_rmbe_size) {
303 /* urg data confirmed by peer, indicate we're ready for more */
304 conn->urg_tx_pend = false;
305 smc->sk.sk_write_space(&smc->sk);
306 }
Ursula Braun51f1de72018-01-26 09:28:48 +0100307
Ursula Braunb38d7322017-01-09 16:55:25 +0100308 if (conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) {
Ursula Braun5f083182017-01-09 16:55:22 +0100309 smc->sk.sk_err = ECONNRESET;
Ursula Braunb38d7322017-01-09 16:55:25 +0100310 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
311 }
Ursula Braun46c28db2017-04-10 14:58:01 +0200312 if (smc_cdc_rxed_any_close_or_senddone(conn)) {
313 smc->sk.sk_shutdown |= RCV_SHUTDOWN;
314 if (smc->clcsock && smc->clcsock->sk)
315 smc->clcsock->sk->sk_shutdown |= RCV_SHUTDOWN;
316 sock_set_flag(&smc->sk, SOCK_DONE);
Ursula Braun51f1de72018-01-26 09:28:48 +0100317 sock_hold(&smc->sk); /* sock_put in close_work */
318 if (!schedule_work(&conn->close_work))
319 sock_put(&smc->sk);
Ursula Braunb38d7322017-01-09 16:55:25 +0100320 }
Ursula Braun5f083182017-01-09 16:55:22 +0100321}
322
323/* called under tasklet context */
Hans Wippeld7b0e372018-05-18 09:34:15 +0200324static void smc_cdc_msg_recv(struct smc_sock *smc, struct smc_cdc_msg *cdc)
Ursula Braun5f083182017-01-09 16:55:22 +0100325{
Ursula Braun5f083182017-01-09 16:55:22 +0100326 sock_hold(&smc->sk);
Ursula Braun5f083182017-01-09 16:55:22 +0100327 bh_lock_sock(&smc->sk);
Hans Wippeld7b0e372018-05-18 09:34:15 +0200328 smc_cdc_msg_recv_action(smc, cdc);
Ursula Braun5f083182017-01-09 16:55:22 +0100329 bh_unlock_sock(&smc->sk);
330 sock_put(&smc->sk); /* no free sk in softirq-context */
331}
332
Hans Wippelbe244f22018-06-28 19:05:10 +0200333/* Schedule a tasklet for this connection. Triggered from the ISM device IRQ
334 * handler to indicate update in the DMBE.
335 *
336 * Context:
337 * - tasklet context
338 */
339static void smcd_cdc_rx_tsklet(unsigned long data)
340{
341 struct smc_connection *conn = (struct smc_connection *)data;
342 struct smcd_cdc_msg cdc;
343 struct smc_sock *smc;
344
345 if (!conn)
346 return;
347
348 memcpy(&cdc, conn->rmb_desc->cpu_addr, sizeof(cdc));
349 smc = container_of(conn, struct smc_sock, conn);
350 smc_cdc_msg_recv(smc, (struct smc_cdc_msg *)&cdc);
351}
352
353/* Initialize receive tasklet. Called from ISM device IRQ handler to start
354 * receiver side.
355 */
356void smcd_cdc_rx_init(struct smc_connection *conn)
357{
358 tasklet_init(&conn->rx_tsklet, smcd_cdc_rx_tsklet, (unsigned long)conn);
359}
360
Ursula Braun5f083182017-01-09 16:55:22 +0100361/***************************** init, exit, misc ******************************/
362
363static void smc_cdc_rx_handler(struct ib_wc *wc, void *buf)
364{
365 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
366 struct smc_cdc_msg *cdc = buf;
Hans Wippeld7b0e372018-05-18 09:34:15 +0200367 struct smc_connection *conn;
368 struct smc_link_group *lgr;
369 struct smc_sock *smc;
Ursula Braun5f083182017-01-09 16:55:22 +0100370
371 if (wc->byte_len < offsetof(struct smc_cdc_msg, reserved))
372 return; /* short message */
Karsten Graulcbba07a2018-02-28 12:44:07 +0100373 if (cdc->len != SMC_WR_TX_SIZE)
Ursula Braun5f083182017-01-09 16:55:22 +0100374 return; /* invalid message */
Hans Wippeld7b0e372018-05-18 09:34:15 +0200375
376 /* lookup connection */
377 lgr = container_of(link, struct smc_link_group, lnk[SMC_SINGLE_LINK]);
378 read_lock_bh(&lgr->conns_lock);
379 conn = smc_lgr_find_conn(ntohl(cdc->token), lgr);
380 read_unlock_bh(&lgr->conns_lock);
381 if (!conn)
382 return;
383 smc = container_of(conn, struct smc_sock, conn);
384
385 if (!cdc->prod_flags.failover_validation) {
386 if (smc_cdc_before(ntohs(cdc->seqno),
387 conn->local_rx_ctrl.seqno))
388 /* received seqno is old */
389 return;
390 }
391 smc_cdc_msg_recv(smc, cdc);
Ursula Braun5f083182017-01-09 16:55:22 +0100392}
393
394static struct smc_wr_rx_handler smc_cdc_rx_handlers[] = {
395 {
396 .handler = smc_cdc_rx_handler,
397 .type = SMC_CDC_MSG_TYPE
398 },
399 {
400 .handler = NULL,
401 }
402};
403
404int __init smc_cdc_init(void)
405{
406 struct smc_wr_rx_handler *handler;
407 int rc = 0;
408
409 for (handler = smc_cdc_rx_handlers; handler->handler; handler++) {
410 INIT_HLIST_NODE(&handler->list);
411 rc = smc_wr_rx_register_handler(handler);
412 if (rc)
413 break;
414 }
415 return rc;
416}