blob: 289025cd545ac8e07721d1eb537f76eabe079103 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braune6727f32017-01-09 16:55:23 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Manage send buffer.
6 * Producer:
7 * Copy user space data into send buffer, if send buffer space available.
8 * Consumer:
9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available.
10 *
11 * Copyright IBM Corp. 2016
12 *
13 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
14 */
15
16#include <linux/net.h>
17#include <linux/rcupdate.h>
18#include <linux/workqueue.h>
Ingo Molnarc3edc402017-02-02 08:35:14 +010019#include <linux/sched/signal.h>
20
Ursula Braune6727f32017-01-09 16:55:23 +010021#include <net/sock.h>
Ursula Braun01d2f7e2018-04-26 17:18:22 +020022#include <net/tcp.h>
Ursula Braune6727f32017-01-09 16:55:23 +010023
24#include "smc.h"
25#include "smc_wr.h"
26#include "smc_cdc.h"
Karsten Graul5bc056d2019-02-12 16:29:51 +010027#include "smc_close.h"
Hans Wippelbe244f22018-06-28 19:05:10 +020028#include "smc_ism.h"
Ursula Braune6727f32017-01-09 16:55:23 +010029#include "smc_tx.h"
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +020030#include "smc_stats.h"
Ursula Braune6727f32017-01-09 16:55:23 +010031
Karsten Graul16297d12019-02-12 16:29:52 +010032#define SMC_TX_WORK_DELAY 0
Ursula Braun01d2f7e2018-04-26 17:18:22 +020033#define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */
Ursula Braun18e537c2017-09-21 09:16:33 +020034
Ursula Braune6727f32017-01-09 16:55:23 +010035/***************************** sndbuf producer *******************************/
36
37/* callback implementation for sk.sk_write_space()
Stefan Rasplde8474e2018-05-23 16:38:11 +020038 * to wakeup sndbuf producers that blocked with smc_tx_wait().
Ursula Braune6727f32017-01-09 16:55:23 +010039 * called under sk_socket lock.
40 */
41static void smc_tx_write_space(struct sock *sk)
42{
43 struct socket *sock = sk->sk_socket;
44 struct smc_sock *smc = smc_sk(sk);
45 struct socket_wq *wq;
46
47 /* similar to sk_stream_write_space */
48 if (atomic_read(&smc->conn.sndbuf_space) && sock) {
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +020049 if (test_bit(SOCK_NOSPACE, &sock->flags))
Guvenc Gulce194730a2021-06-16 16:52:58 +020050 SMC_STAT_RMB_TX_FULL(smc, !smc->conn.lnk);
Ursula Braune6727f32017-01-09 16:55:23 +010051 clear_bit(SOCK_NOSPACE, &sock->flags);
52 rcu_read_lock();
53 wq = rcu_dereference(sk->sk_wq);
54 if (skwq_has_sleeper(wq))
55 wake_up_interruptible_poll(&wq->wait,
Linus Torvaldsa9a08842018-02-11 14:34:03 -080056 EPOLLOUT | EPOLLWRNORM |
57 EPOLLWRBAND);
Ursula Braune6727f32017-01-09 16:55:23 +010058 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
59 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT);
60 rcu_read_unlock();
61 }
62}
63
Stefan Rasplde8474e2018-05-23 16:38:11 +020064/* Wakeup sndbuf producers that blocked with smc_tx_wait().
Ursula Braune6727f32017-01-09 16:55:23 +010065 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space().
66 */
67void smc_tx_sndbuf_nonfull(struct smc_sock *smc)
68{
69 if (smc->sk.sk_socket &&
70 test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags))
71 smc->sk.sk_write_space(&smc->sk);
72}
73
Stefan Rasplde8474e2018-05-23 16:38:11 +020074/* blocks sndbuf producer until at least one byte of free space available
75 * or urgent Byte was consumed
76 */
77static int smc_tx_wait(struct smc_sock *smc, int flags)
Ursula Braune6727f32017-01-09 16:55:23 +010078{
79 DEFINE_WAIT_FUNC(wait, woken_wake_function);
80 struct smc_connection *conn = &smc->conn;
81 struct sock *sk = &smc->sk;
Ursula Braune6727f32017-01-09 16:55:23 +010082 long timeo;
83 int rc = 0;
84
85 /* similar to sk_stream_wait_memory */
86 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
Ursula Braune6727f32017-01-09 16:55:23 +010087 add_wait_queue(sk_sleep(sk), &wait);
88 while (1) {
89 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk);
90 if (sk->sk_err ||
91 (sk->sk_shutdown & SEND_SHUTDOWN) ||
Ursula Braunb2900982019-10-21 16:13:08 +020092 conn->killed ||
Ursula Braune6727f32017-01-09 16:55:23 +010093 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) {
94 rc = -EPIPE;
95 break;
96 }
Ursula Braunaa377e62018-01-24 10:28:17 +010097 if (smc_cdc_rxed_any_close(conn)) {
Ursula Braune6727f32017-01-09 16:55:23 +010098 rc = -ECONNRESET;
99 break;
100 }
101 if (!timeo) {
Jason Baron4651d182019-08-19 14:36:01 -0400102 /* ensure EPOLLOUT is subsequently generated */
103 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Ursula Braune6727f32017-01-09 16:55:23 +0100104 rc = -EAGAIN;
105 break;
106 }
107 if (signal_pending(current)) {
108 rc = sock_intr_errno(timeo);
109 break;
110 }
111 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
Stefan Rasplde8474e2018-05-23 16:38:11 +0200112 if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend)
113 break; /* at least 1 byte of free & no urgent data */
Ursula Braune6727f32017-01-09 16:55:23 +0100114 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
Ursula Braune6727f32017-01-09 16:55:23 +0100115 sk_wait_event(sk, &timeo,
116 sk->sk_err ||
117 (sk->sk_shutdown & SEND_SHUTDOWN) ||
Ursula Braunaa377e62018-01-24 10:28:17 +0100118 smc_cdc_rxed_any_close(conn) ||
Stefan Rasplde8474e2018-05-23 16:38:11 +0200119 (atomic_read(&conn->sndbuf_space) &&
120 !conn->urg_tx_pend),
Ursula Braune6727f32017-01-09 16:55:23 +0100121 &wait);
Ursula Braune6727f32017-01-09 16:55:23 +0100122 }
123 remove_wait_queue(sk_sleep(sk), &wait);
124 return rc;
125}
126
Ursula Braun01d2f7e2018-04-26 17:18:22 +0200127static bool smc_tx_is_corked(struct smc_sock *smc)
128{
129 struct tcp_sock *tp = tcp_sk(smc->clcsock->sk);
130
131 return (tp->nonagle & TCP_NAGLE_CORK) ? true : false;
132}
133
Ursula Braune6727f32017-01-09 16:55:23 +0100134/* sndbuf producer: main API called by socket layer.
135 * called under sock lock.
136 */
137int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len)
138{
139 size_t copylen, send_done = 0, send_remaining = len;
140 size_t chunk_len, chunk_off, chunk_len_sum;
141 struct smc_connection *conn = &smc->conn;
142 union smc_host_cursor prep;
143 struct sock *sk = &smc->sk;
144 char *sndbuf_base;
145 int tx_cnt_prep;
146 int writespace;
147 int rc, chunk;
148
149 /* This should be in poll */
150 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk);
151
152 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) {
153 rc = -EPIPE;
154 goto out_err;
155 }
156
Guvenc Gulce17081632021-06-25 17:11:02 +0200157 if (sk->sk_state == SMC_INIT)
158 return -ENOTCONN;
159
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200160 if (len > conn->sndbuf_desc->len)
Guvenc Gulce194730a2021-06-16 16:52:58 +0200161 SMC_STAT_RMB_TX_SIZE_SMALL(smc, !conn->lnk);
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200162
163 if (len > conn->peer_rmbe_size)
Guvenc Gulce194730a2021-06-16 16:52:58 +0200164 SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, !conn->lnk);
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200165
166 if (msg->msg_flags & MSG_OOB)
Guvenc Gulce194730a2021-06-16 16:52:58 +0200167 SMC_STAT_INC(smc, urg_data_cnt);
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200168
Ursula Braune6727f32017-01-09 16:55:23 +0100169 while (msg_data_left(msg)) {
Ursula Braune6727f32017-01-09 16:55:23 +0100170 if (smc->sk.sk_shutdown & SEND_SHUTDOWN ||
Ursula Braunb38d7322017-01-09 16:55:25 +0100171 (smc->sk.sk_err == ECONNABORTED) ||
Ursula Braunb2900982019-10-21 16:13:08 +0200172 conn->killed)
Ursula Braune6727f32017-01-09 16:55:23 +0100173 return -EPIPE;
174 if (smc_cdc_rxed_any_close(conn))
175 return send_done ?: -ECONNRESET;
176
Stefan Rasplde8474e2018-05-23 16:38:11 +0200177 if (msg->msg_flags & MSG_OOB)
178 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1;
179
180 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) {
Karsten Graul6889b362019-01-30 18:51:03 +0100181 if (send_done)
182 return send_done;
Stefan Rasplde8474e2018-05-23 16:38:11 +0200183 rc = smc_tx_wait(smc, msg->msg_flags);
Karsten Graul6889b362019-01-30 18:51:03 +0100184 if (rc)
Ursula Braune6727f32017-01-09 16:55:23 +0100185 goto out_err;
Ursula Braune6727f32017-01-09 16:55:23 +0100186 continue;
187 }
188
189 /* initialize variables for 1st iteration of subsequent loop */
Stefan Rasplde8474e2018-05-23 16:38:11 +0200190 /* could be just 1 byte, even after smc_tx_wait above */
Ursula Braune6727f32017-01-09 16:55:23 +0100191 writespace = atomic_read(&conn->sndbuf_space);
192 /* not more than what user space asked for */
193 copylen = min_t(size_t, send_remaining, writespace);
194 /* determine start of sndbuf */
195 sndbuf_base = conn->sndbuf_desc->cpu_addr;
Stefan Rasplbac6de72018-07-23 13:53:09 +0200196 smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100197 tx_cnt_prep = prep.count;
198 /* determine chunks where to write into sndbuf */
199 /* either unwrapped case, or 1st chunk of wrapped case */
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200200 chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len -
201 tx_cnt_prep);
Ursula Braune6727f32017-01-09 16:55:23 +0100202 chunk_len_sum = chunk_len;
203 chunk_off = tx_cnt_prep;
Ursula Braun10428dd2017-07-28 13:56:22 +0200204 smc_sndbuf_sync_sg_for_cpu(conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100205 for (chunk = 0; chunk < 2; chunk++) {
206 rc = memcpy_from_msg(sndbuf_base + chunk_off,
207 msg, chunk_len);
208 if (rc) {
Ursula Braun10428dd2017-07-28 13:56:22 +0200209 smc_sndbuf_sync_sg_for_device(conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100210 if (send_done)
211 return send_done;
212 goto out_err;
213 }
214 send_done += chunk_len;
215 send_remaining -= chunk_len;
216
217 if (chunk_len_sum == copylen)
218 break; /* either on 1st or 2nd iteration */
219 /* prepare next (== 2nd) iteration */
220 chunk_len = copylen - chunk_len; /* remainder */
221 chunk_len_sum += chunk_len;
222 chunk_off = 0; /* modulo offset in send ring buffer */
223 }
Ursula Braun10428dd2017-07-28 13:56:22 +0200224 smc_sndbuf_sync_sg_for_device(conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100225 /* update cursors */
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200226 smc_curs_add(conn->sndbuf_desc->len, &prep, copylen);
Stefan Rasplbac6de72018-07-23 13:53:09 +0200227 smc_curs_copy(&conn->tx_curs_prep, &prep, conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100228 /* increased in send tasklet smc_cdc_tx_handler() */
229 smp_mb__before_atomic();
230 atomic_sub(copylen, &conn->sndbuf_space);
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200231 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */
Ursula Braune6727f32017-01-09 16:55:23 +0100232 smp_mb__after_atomic();
233 /* since we just produced more new data into sndbuf,
234 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC
235 */
Stefan Rasplde8474e2018-05-23 16:38:11 +0200236 if ((msg->msg_flags & MSG_OOB) && !send_remaining)
237 conn->urg_tx_pend = true;
Ursula Braun01d2f7e2018-04-26 17:18:22 +0200238 if ((msg->msg_flags & MSG_MORE || smc_tx_is_corked(smc)) &&
239 (atomic_read(&conn->sndbuf_space) >
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200240 (conn->sndbuf_desc->len >> 1)))
Ursula Braun01d2f7e2018-04-26 17:18:22 +0200241 /* for a corked socket defer the RDMA writes if there
242 * is still sufficient sndbuf_space available
243 */
Karsten Graul22ef4732020-09-10 18:48:29 +0200244 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work,
245 SMC_TX_CORK_DELAY);
Ursula Braun01d2f7e2018-04-26 17:18:22 +0200246 else
247 smc_tx_sndbuf_nonempty(conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100248 } /* while (msg_data_left(msg)) */
249
250 return send_done;
251
252out_err:
253 rc = sk_stream_error(sk, msg->msg_flags, rc);
254 /* make sure we wake any epoll edge trigger waiter */
255 if (unlikely(rc == -EAGAIN))
256 sk->sk_write_space(sk);
257 return rc;
258}
259
260/***************************** sndbuf consumer *******************************/
261
Hans Wippelbe244f22018-06-28 19:05:10 +0200262/* sndbuf consumer: actual data transfer of one target chunk with ISM write */
263int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len,
264 u32 offset, int signal)
265{
266 struct smc_ism_position pos;
267 int rc;
268
269 memset(&pos, 0, sizeof(pos));
270 pos.token = conn->peer_token;
271 pos.index = conn->peer_rmbe_idx;
272 pos.offset = conn->tx_off + offset;
273 pos.signal = signal;
274 rc = smc_ism_write(conn->lgr->smcd, &pos, data, len);
275 if (rc)
276 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1;
277 return rc;
278}
279
Ursula Braune6727f32017-01-09 16:55:23 +0100280/* sndbuf consumer: actual data transfer of one target chunk with RDMA write */
281static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset,
Ursula Braunad6f3172019-02-04 13:44:44 +0100282 int num_sges, struct ib_rdma_wr *rdma_wr)
Ursula Braune6727f32017-01-09 16:55:23 +0100283{
284 struct smc_link_group *lgr = conn->lgr;
Karsten Graul387707f2020-04-29 17:10:40 +0200285 struct smc_link *link = conn->lnk;
Ursula Braune6727f32017-01-09 16:55:23 +0100286 int rc;
287
Ursula Braunad6f3172019-02-04 13:44:44 +0100288 rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link);
289 rdma_wr->wr.num_sge = num_sges;
290 rdma_wr->remote_addr =
Karsten Graul387707f2020-04-29 17:10:40 +0200291 lgr->rtokens[conn->rtoken_idx][link->link_idx].dma_addr +
Ursula Braune6727f32017-01-09 16:55:23 +0100292 /* RMBE within RMB */
Hans Wippel95d8d2632018-05-18 09:34:13 +0200293 conn->tx_off +
Ursula Braune6727f32017-01-09 16:55:23 +0100294 /* offset within RMBE */
295 peer_rmbe_offset;
Karsten Graul387707f2020-04-29 17:10:40 +0200296 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][link->link_idx].rkey;
Ursula Braunad6f3172019-02-04 13:44:44 +0100297 rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL);
Ursula Braunb2900982019-10-21 16:13:08 +0200298 if (rc)
Karsten Graul87523932020-05-01 12:48:09 +0200299 smcr_link_down_cond_sched(link);
Ursula Braune6727f32017-01-09 16:55:23 +0100300 return rc;
301}
302
303/* sndbuf consumer */
304static inline void smc_tx_advance_cursors(struct smc_connection *conn,
305 union smc_host_cursor *prod,
306 union smc_host_cursor *sent,
307 size_t len)
308{
309 smc_curs_add(conn->peer_rmbe_size, prod, len);
310 /* increased in recv tasklet smc_cdc_msg_rcv() */
311 smp_mb__before_atomic();
312 /* data in flight reduces usable snd_wnd */
313 atomic_sub(len, &conn->peer_rmbe_space);
314 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */
315 smp_mb__after_atomic();
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200316 smc_curs_add(conn->sndbuf_desc->len, sent, len);
Ursula Braune6727f32017-01-09 16:55:23 +0100317}
318
Hans Wippelbe244f22018-06-28 19:05:10 +0200319/* SMC-R helper for smc_tx_rdma_writes() */
320static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len,
321 size_t src_off, size_t src_len,
Ursula Braunad6f3172019-02-04 13:44:44 +0100322 size_t dst_off, size_t dst_len,
323 struct smc_rdma_wr *wr_rdma_buf)
Ursula Braune6727f32017-01-09 16:55:23 +0100324{
Karsten Graul387707f2020-04-29 17:10:40 +0200325 struct smc_link *link = conn->lnk;
326
Hans Wippelbe244f22018-06-28 19:05:10 +0200327 dma_addr_t dma_addr =
Karsten Graul387707f2020-04-29 17:10:40 +0200328 sg_dma_address(conn->sndbuf_desc->sgt[link->link_idx].sgl);
Hans Wippelbe244f22018-06-28 19:05:10 +0200329 int src_len_sum = src_len, dst_len_sum = dst_len;
Hans Wippelbe244f22018-06-28 19:05:10 +0200330 int sent_count = src_off;
331 int srcchunk, dstchunk;
Ursula Braune6727f32017-01-09 16:55:23 +0100332 int num_sges;
333 int rc;
334
Ursula Braune6727f32017-01-09 16:55:23 +0100335 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
Ursula Braunad6f3172019-02-04 13:44:44 +0100336 struct ib_sge *sge =
337 wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list;
338
Ursula Braune6727f32017-01-09 16:55:23 +0100339 num_sges = 0;
340 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
Ursula Braunad6f3172019-02-04 13:44:44 +0100341 sge[srcchunk].addr = dma_addr + src_off;
342 sge[srcchunk].length = src_len;
Ursula Braune6727f32017-01-09 16:55:23 +0100343 num_sges++;
Hans Wippelbe244f22018-06-28 19:05:10 +0200344
Ursula Braune6727f32017-01-09 16:55:23 +0100345 src_off += src_len;
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200346 if (src_off >= conn->sndbuf_desc->len)
347 src_off -= conn->sndbuf_desc->len;
Ursula Braune6727f32017-01-09 16:55:23 +0100348 /* modulo in send ring */
349 if (src_len_sum == dst_len)
350 break; /* either on 1st or 2nd iteration */
351 /* prepare next (== 2nd) iteration */
352 src_len = dst_len - src_len; /* remainder */
353 src_len_sum += src_len;
354 }
Ursula Braunad6f3172019-02-04 13:44:44 +0100355 rc = smc_tx_rdma_write(conn, dst_off, num_sges,
356 &wr_rdma_buf->wr_tx_rdma[dstchunk]);
Ursula Braune6727f32017-01-09 16:55:23 +0100357 if (rc)
358 return rc;
359 if (dst_len_sum == len)
360 break; /* either on 1st or 2nd iteration */
361 /* prepare next (== 2nd) iteration */
362 dst_off = 0; /* modulo offset in RMBE ring buffer */
363 dst_len = len - dst_len; /* remainder */
364 dst_len_sum += dst_len;
Hans Wippelbe244f22018-06-28 19:05:10 +0200365 src_len = min_t(int, dst_len, conn->sndbuf_desc->len -
366 sent_count);
Ursula Braune6727f32017-01-09 16:55:23 +0100367 src_len_sum = src_len;
368 }
Hans Wippelbe244f22018-06-28 19:05:10 +0200369 return 0;
370}
371
372/* SMC-D helper for smc_tx_rdma_writes() */
373static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len,
374 size_t src_off, size_t src_len,
375 size_t dst_off, size_t dst_len)
376{
377 int src_len_sum = src_len, dst_len_sum = dst_len;
378 int srcchunk, dstchunk;
379 int rc;
380
381 for (dstchunk = 0; dstchunk < 2; dstchunk++) {
382 for (srcchunk = 0; srcchunk < 2; srcchunk++) {
383 void *data = conn->sndbuf_desc->cpu_addr + src_off;
384
385 rc = smcd_tx_ism_write(conn, data, src_len, dst_off +
386 sizeof(struct smcd_cdc_msg), 0);
387 if (rc)
388 return rc;
389 dst_off += src_len;
390 src_off += src_len;
391 if (src_off >= conn->sndbuf_desc->len)
392 src_off -= conn->sndbuf_desc->len;
393 /* modulo in send ring */
394 if (src_len_sum == dst_len)
395 break; /* either on 1st or 2nd iteration */
396 /* prepare next (== 2nd) iteration */
397 src_len = dst_len - src_len; /* remainder */
398 src_len_sum += src_len;
399 }
400 if (dst_len_sum == len)
401 break; /* either on 1st or 2nd iteration */
402 /* prepare next (== 2nd) iteration */
403 dst_off = 0; /* modulo offset in RMBE ring buffer */
404 dst_len = len - dst_len; /* remainder */
405 dst_len_sum += dst_len;
406 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off);
407 src_len_sum = src_len;
408 }
409 return 0;
410}
411
Ursula Braune6727f32017-01-09 16:55:23 +0100412/* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit;
413 * usable snd_wnd as max transmit
414 */
Ursula Braunad6f3172019-02-04 13:44:44 +0100415static int smc_tx_rdma_writes(struct smc_connection *conn,
416 struct smc_rdma_wr *wr_rdma_buf)
Ursula Braune6727f32017-01-09 16:55:23 +0100417{
Hans Wippelbe244f22018-06-28 19:05:10 +0200418 size_t len, src_len, dst_off, dst_len; /* current chunk values */
Ursula Braune6727f32017-01-09 16:55:23 +0100419 union smc_host_cursor sent, prep, prod, cons;
Ursula Braune6727f32017-01-09 16:55:23 +0100420 struct smc_cdc_producer_flags *pflags;
421 int to_send, rmbespace;
Ursula Braune6727f32017-01-09 16:55:23 +0100422 int rc;
423
424 /* source: sndbuf */
Stefan Rasplbac6de72018-07-23 13:53:09 +0200425 smc_curs_copy(&sent, &conn->tx_curs_sent, conn);
426 smc_curs_copy(&prep, &conn->tx_curs_prep, conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100427 /* cf. wmem_alloc - (snd_max - snd_una) */
428 to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep);
429 if (to_send <= 0)
430 return 0;
431
432 /* destination: RMBE */
433 /* cf. snd_wnd */
434 rmbespace = atomic_read(&conn->peer_rmbe_space);
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200435 if (rmbespace <= 0) {
Guvenc Gulce194730a2021-06-16 16:52:58 +0200436 struct smc_sock *smc = container_of(conn, struct smc_sock,
437 conn);
438 SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk);
Ursula Braune6727f32017-01-09 16:55:23 +0100439 return 0;
Guvenc Gulcee0e4b8f2021-06-16 16:52:55 +0200440 }
Stefan Rasplbac6de72018-07-23 13:53:09 +0200441 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn);
442 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100443
444 /* if usable snd_wnd closes ask peer to advertise once it opens again */
445 pflags = &conn->local_tx_ctrl.prod_flags;
446 pflags->write_blocked = (to_send >= rmbespace);
447 /* cf. usable snd_wnd */
448 len = min(to_send, rmbespace);
449
450 /* initialize variables for first iteration of subsequent nested loop */
Ursula Braune6727f32017-01-09 16:55:23 +0100451 dst_off = prod.count;
452 if (prod.wrap == cons.wrap) {
453 /* the filled destination area is unwrapped,
454 * hence the available free destination space is wrapped
455 * and we need 2 destination chunks of sum len; start with 1st
456 * which is limited by what's available in sndbuf
457 */
458 dst_len = min_t(size_t,
459 conn->peer_rmbe_size - prod.count, len);
460 } else {
461 /* the filled destination area is wrapped,
462 * hence the available free destination space is unwrapped
463 * and we need a single destination chunk of entire len
464 */
465 dst_len = len;
466 }
Ursula Braune6727f32017-01-09 16:55:23 +0100467 /* dst_len determines the maximum src_len */
468 if (sent.count + dst_len <= conn->sndbuf_desc->len) {
469 /* unwrapped src case: single chunk of entire dst_len */
470 src_len = dst_len;
471 } else {
472 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */
473 src_len = conn->sndbuf_desc->len - sent.count;
474 }
Hans Wippelbe244f22018-06-28 19:05:10 +0200475
476 if (conn->lgr->is_smcd)
477 rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len,
478 dst_off, dst_len);
479 else
480 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len,
Ursula Braunad6f3172019-02-04 13:44:44 +0100481 dst_off, dst_len, wr_rdma_buf);
Hans Wippelbe244f22018-06-28 19:05:10 +0200482 if (rc)
483 return rc;
Ursula Braune6727f32017-01-09 16:55:23 +0100484
Stefan Rasplde8474e2018-05-23 16:38:11 +0200485 if (conn->urg_tx_pend && len == to_send)
486 pflags->urg_data_present = 1;
Ursula Braune6727f32017-01-09 16:55:23 +0100487 smc_tx_advance_cursors(conn, &prod, &sent, len);
488 /* update connection's cursors with advanced local cursors */
Stefan Rasplbac6de72018-07-23 13:53:09 +0200489 smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn);
Ursula Braune6727f32017-01-09 16:55:23 +0100490 /* dst: peer RMBE */
Stefan Rasplbac6de72018-07-23 13:53:09 +0200491 smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */
Ursula Braune6727f32017-01-09 16:55:23 +0100492
493 return 0;
494}
495
496/* Wakeup sndbuf consumers from any context (IRQ or process)
497 * since there is more data to transmit; usable snd_wnd as max transmit
498 */
Hans Wippelbe244f22018-06-28 19:05:10 +0200499static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn)
Ursula Braune6727f32017-01-09 16:55:23 +0100500{
Ursula Brauncecc7a32019-02-21 13:00:58 +0100501 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
Karsten Graulc6f02eb2020-05-04 14:18:38 +0200502 struct smc_link *link = conn->lnk;
Ursula Braunad6f3172019-02-04 13:44:44 +0100503 struct smc_rdma_wr *wr_rdma_buf;
Ursula Braune6727f32017-01-09 16:55:23 +0100504 struct smc_cdc_tx_pend *pend;
505 struct smc_wr_buf *wr_buf;
506 int rc;
507
Karsten Graulc6f02eb2020-05-04 14:18:38 +0200508 rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend);
Ursula Braune6727f32017-01-09 16:55:23 +0100509 if (rc < 0) {
510 if (rc == -EBUSY) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100511 struct smc_sock *smc =
512 container_of(conn, struct smc_sock, conn);
513
Karsten Graul33f3fcc2019-01-30 18:51:05 +0100514 if (smc->sk.sk_err == ECONNABORTED)
515 return sock_error(&smc->sk);
Ursula Braunb2900982019-10-21 16:13:08 +0200516 if (conn->killed)
517 return -EPIPE;
Ursula Braune6727f32017-01-09 16:55:23 +0100518 rc = 0;
Karsten Graul22ef4732020-09-10 18:48:29 +0200519 mod_delayed_work(conn->lgr->tx_wq, &conn->tx_work,
Ursula Braunb2900982019-10-21 16:13:08 +0200520 SMC_TX_WORK_DELAY);
Ursula Braune6727f32017-01-09 16:55:23 +0100521 }
Karsten Graul33f3fcc2019-01-30 18:51:05 +0100522 return rc;
Ursula Braune6727f32017-01-09 16:55:23 +0100523 }
524
Karsten Graul33f3fcc2019-01-30 18:51:05 +0100525 spin_lock_bh(&conn->send_lock);
Karsten Graulc6f02eb2020-05-04 14:18:38 +0200526 if (link != conn->lnk) {
527 /* link of connection changed, tx_work will restart */
528 smc_wr_tx_put_slot(link,
529 (struct smc_wr_tx_pend_priv *)pend);
530 rc = -ENOLINK;
531 goto out_unlock;
532 }
Ursula Brauncecc7a32019-02-21 13:00:58 +0100533 if (!pflags->urg_data_present) {
Ursula Braunad6f3172019-02-04 13:44:44 +0100534 rc = smc_tx_rdma_writes(conn, wr_rdma_buf);
Stefan Rasplde8474e2018-05-23 16:38:11 +0200535 if (rc) {
Karsten Graulc6f02eb2020-05-04 14:18:38 +0200536 smc_wr_tx_put_slot(link,
Stefan Rasplde8474e2018-05-23 16:38:11 +0200537 (struct smc_wr_tx_pend_priv *)pend);
538 goto out_unlock;
539 }
Ursula Braune6727f32017-01-09 16:55:23 +0100540 }
541
542 rc = smc_cdc_msg_send(conn, wr_buf, pend);
Stefan Rasplde8474e2018-05-23 16:38:11 +0200543 if (!rc && pflags->urg_data_present) {
544 pflags->urg_data_pending = 0;
545 pflags->urg_data_present = 0;
546 }
Ursula Braune6727f32017-01-09 16:55:23 +0100547
548out_unlock:
549 spin_unlock_bh(&conn->send_lock);
550 return rc;
551}
552
Hans Wippelbe244f22018-06-28 19:05:10 +0200553static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn)
554{
555 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags;
556 int rc = 0;
557
558 spin_lock_bh(&conn->send_lock);
559 if (!pflags->urg_data_present)
Ursula Braunad6f3172019-02-04 13:44:44 +0100560 rc = smc_tx_rdma_writes(conn, NULL);
Hans Wippelbe244f22018-06-28 19:05:10 +0200561 if (!rc)
562 rc = smcd_cdc_msg_send(conn);
563
564 if (!rc && pflags->urg_data_present) {
565 pflags->urg_data_pending = 0;
566 pflags->urg_data_present = 0;
567 }
568 spin_unlock_bh(&conn->send_lock);
569 return rc;
570}
571
572int smc_tx_sndbuf_nonempty(struct smc_connection *conn)
573{
574 int rc;
575
Ursula Braunb2900982019-10-21 16:13:08 +0200576 if (conn->killed ||
577 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
578 return -EPIPE; /* connection being aborted */
Hans Wippelbe244f22018-06-28 19:05:10 +0200579 if (conn->lgr->is_smcd)
580 rc = smcd_tx_sndbuf_nonempty(conn);
581 else
582 rc = smcr_tx_sndbuf_nonempty(conn);
583
Karsten Graul5bc056d2019-02-12 16:29:51 +0100584 if (!rc) {
585 /* trigger socket release if connection is closing */
586 struct smc_sock *smc = container_of(conn, struct smc_sock,
587 conn);
588 smc_close_wake_tx_prepared(smc);
589 }
Hans Wippelbe244f22018-06-28 19:05:10 +0200590 return rc;
591}
592
Ursula Braune6727f32017-01-09 16:55:23 +0100593/* Wakeup sndbuf consumers from process context
594 * since there is more data to transmit
595 */
Eric Dumazetbe7f3e52018-05-17 03:54:21 -0700596void smc_tx_work(struct work_struct *work)
Ursula Braune6727f32017-01-09 16:55:23 +0100597{
Ursula Braun18e537c2017-09-21 09:16:33 +0200598 struct smc_connection *conn = container_of(to_delayed_work(work),
Ursula Braune6727f32017-01-09 16:55:23 +0100599 struct smc_connection,
600 tx_work);
601 struct smc_sock *smc = container_of(conn, struct smc_sock, conn);
Ursula Braun90cacb22017-04-10 14:57:59 +0200602 int rc;
Ursula Braune6727f32017-01-09 16:55:23 +0100603
604 lock_sock(&smc->sk);
Ursula Braunb2900982019-10-21 16:13:08 +0200605 if (smc->sk.sk_err)
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100606 goto out;
607
Ursula Braun90cacb22017-04-10 14:57:59 +0200608 rc = smc_tx_sndbuf_nonempty(conn);
609 if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked &&
610 !atomic_read(&conn->bytes_to_rcv))
611 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100612
613out:
Ursula Braune6727f32017-01-09 16:55:23 +0100614 release_sock(&smc->sk);
615}
616
Stefan Rasplde8474e2018-05-23 16:38:11 +0200617void smc_tx_consumer_update(struct smc_connection *conn, bool force)
Ursula Braun952310c2017-01-09 16:55:24 +0100618{
Ursula Braun99be51f2018-07-18 15:22:49 +0200619 union smc_host_cursor cfed, cons, prod;
620 int sender_free = conn->rmb_desc->len;
Ursula Braun6b5771a2017-12-07 13:38:48 +0100621 int to_confirm;
Ursula Braun952310c2017-01-09 16:55:24 +0100622
Stefan Rasplbac6de72018-07-23 13:53:09 +0200623 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn);
624 smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn);
Hans Wippel69cb7dc2018-05-18 09:34:10 +0200625 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
Ursula Braun99be51f2018-07-18 15:22:49 +0200626 if (to_confirm > conn->rmbe_update_limit) {
Stefan Rasplbac6de72018-07-23 13:53:09 +0200627 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn);
Ursula Braun99be51f2018-07-18 15:22:49 +0200628 sender_free = conn->rmb_desc->len -
Ursula Braunb8649ef2019-02-04 13:44:45 +0100629 smc_curs_diff_large(conn->rmb_desc->len,
630 &cfed, &prod);
Ursula Braun99be51f2018-07-18 15:22:49 +0200631 }
Ursula Braun952310c2017-01-09 16:55:24 +0100632
633 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
Stefan Rasplde8474e2018-05-23 16:38:11 +0200634 force ||
Ursula Braun952310c2017-01-09 16:55:24 +0100635 ((to_confirm > conn->rmbe_update_limit) &&
Ursula Braun99be51f2018-07-18 15:22:49 +0200636 ((sender_free <= (conn->rmb_desc->len / 2)) ||
Ursula Braun952310c2017-01-09 16:55:24 +0100637 conn->local_rx_ctrl.prod_flags.write_blocked))) {
Ursula Braunb2900982019-10-21 16:13:08 +0200638 if (conn->killed ||
639 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort)
640 return;
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100641 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
Ursula Braunb2900982019-10-21 16:13:08 +0200642 !conn->killed) {
Karsten Graul22ef4732020-09-10 18:48:29 +0200643 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work,
644 SMC_TX_WORK_DELAY);
Ursula Braun952310c2017-01-09 16:55:24 +0100645 return;
646 }
Ursula Braun952310c2017-01-09 16:55:24 +0100647 }
648 if (conn->local_rx_ctrl.prod_flags.write_blocked &&
649 !atomic_read(&conn->bytes_to_rcv))
650 conn->local_rx_ctrl.prod_flags.write_blocked = 0;
651}
652
Ursula Braune6727f32017-01-09 16:55:23 +0100653/***************************** send initialize *******************************/
654
655/* Initialize send properties on connection establishment. NB: not __init! */
656void smc_tx_init(struct smc_sock *smc)
657{
658 smc->sk.sk_write_space = smc_tx_write_space;
Ursula Braune6727f32017-01-09 16:55:23 +0100659}