blob: 24be1d03fef9e795541c5711fbe0e9b84b76fa53 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braunf38ba1792017-01-09 16:55:19 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Work Requests exploiting Infiniband API
6 *
7 * Work requests (WR) of type ib_post_send or ib_post_recv respectively
8 * are submitted to either RC SQ or RC RQ respectively
9 * (reliably connected send/receive queue)
10 * and become work queue entries (WQEs).
11 * While an SQ WR/WQE is pending, we track it until transmission completion.
12 * Through a send or receive completion queue (CQ) respectively,
13 * we get completion queue entries (CQEs) [aka work completions (WCs)].
14 * Since the CQ callback is called from IRQ context, we split work by using
15 * bottom halves implemented by tasklets.
16 *
17 * SMC uses this to exchange LLC (link layer control)
18 * and CDC (connection data control) messages.
19 *
20 * Copyright IBM Corp. 2016
21 *
22 * Author(s): Steffen Maier <maier@linux.vnet.ibm.com>
23 */
24
25#include <linux/atomic.h>
26#include <linux/hashtable.h>
27#include <linux/wait.h>
28#include <rdma/ib_verbs.h>
29#include <asm/div64.h>
30
31#include "smc.h"
32#include "smc_wr.h"
33
34#define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */
35
36#define SMC_WR_RX_HASH_BITS 4
37static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS);
38static DEFINE_SPINLOCK(smc_wr_rx_hash_lock);
39
40struct smc_wr_tx_pend { /* control data for a pending send request */
41 u64 wr_id; /* work request id sent */
42 smc_wr_tx_handler handler;
43 enum ib_wc_status wc_status; /* CQE status */
44 struct smc_link *link;
45 u32 idx;
46 struct smc_wr_tx_pend_priv priv;
Karsten Graul09c61d22020-05-04 14:18:41 +020047 u8 compl_requested;
Ursula Braunf38ba1792017-01-09 16:55:19 +010048};
49
50/******************************** send queue *********************************/
51
52/*------------------------------- completion --------------------------------*/
53
Ursula Braun6a37ad32019-11-14 13:02:46 +010054/* returns true if at least one tx work request is pending on the given link */
55static inline bool smc_wr_is_tx_pend(struct smc_link *link)
56{
Christophe JAILLET49dc9012021-12-30 11:40:40 +010057 return !bitmap_empty(link->wr_tx_mask, link->wr_tx_cnt);
Ursula Braun6a37ad32019-11-14 13:02:46 +010058}
59
60/* wait till all pending tx work requests on the given link are completed */
Dust Li349d4312021-12-28 17:03:25 +080061void smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
Ursula Braun6a37ad32019-11-14 13:02:46 +010062{
Dust Li349d4312021-12-28 17:03:25 +080063 wait_event(link->wr_tx_wait, !smc_wr_is_tx_pend(link));
Ursula Braun6a37ad32019-11-14 13:02:46 +010064}
65
Ursula Braunf38ba1792017-01-09 16:55:19 +010066static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
67{
68 u32 i;
69
70 for (i = 0; i < link->wr_tx_cnt; i++) {
71 if (link->wr_tx_pends[i].wr_id == wr_id)
72 return i;
73 }
74 return link->wr_tx_cnt;
75}
76
77static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
78{
79 struct smc_wr_tx_pend pnd_snd;
80 struct smc_link *link;
81 u32 pnd_snd_idx;
Ursula Braunf38ba1792017-01-09 16:55:19 +010082
83 link = wc->qp->qp_context;
Ursula Braun652a1e42017-07-28 13:56:17 +020084
85 if (wc->opcode == IB_WC_REG_MR) {
86 if (wc->status)
87 link->wr_reg_state = FAILED;
88 else
89 link->wr_reg_state = CONFIRMED;
Ursula Braun15e1b992019-11-14 13:02:44 +010090 smc_wr_wakeup_reg_wait(link);
Ursula Braun652a1e42017-07-28 13:56:17 +020091 return;
92 }
93
Ursula Braunf38ba1792017-01-09 16:55:19 +010094 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id);
Karsten Graul8799e312021-10-16 11:37:49 +020095 if (pnd_snd_idx == link->wr_tx_cnt) {
96 if (link->lgr->smc_version != SMC_V2 ||
97 link->wr_tx_v2_pend->wr_id != wc->wr_id)
98 return;
99 link->wr_tx_v2_pend->wc_status = wc->status;
100 memcpy(&pnd_snd, link->wr_tx_v2_pend, sizeof(pnd_snd));
101 /* clear the full struct smc_wr_tx_pend including .priv */
102 memset(link->wr_tx_v2_pend, 0,
103 sizeof(*link->wr_tx_v2_pend));
104 memset(link->lgr->wr_tx_buf_v2, 0,
105 sizeof(*link->lgr->wr_tx_buf_v2));
106 } else {
107 link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
108 if (link->wr_tx_pends[pnd_snd_idx].compl_requested)
109 complete(&link->wr_tx_compl[pnd_snd_idx]);
110 memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx],
111 sizeof(pnd_snd));
112 /* clear the full struct smc_wr_tx_pend including .priv */
113 memset(&link->wr_tx_pends[pnd_snd_idx], 0,
114 sizeof(link->wr_tx_pends[pnd_snd_idx]));
115 memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
116 sizeof(link->wr_tx_bufs[pnd_snd_idx]));
117 if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
118 return;
119 }
120
Ursula Braunf38ba1792017-01-09 16:55:19 +0100121 if (wc->status) {
Karsten Graul8799e312021-10-16 11:37:49 +0200122 if (link->lgr->smc_version == SMC_V2) {
123 memset(link->wr_tx_v2_pend, 0,
124 sizeof(*link->wr_tx_v2_pend));
125 memset(link->lgr->wr_tx_buf_v2, 0,
126 sizeof(*link->lgr->wr_tx_buf_v2));
127 }
Karsten Graul87523932020-05-01 12:48:09 +0200128 /* terminate link */
129 smcr_link_down_cond_sched(link);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100130 }
131 if (pnd_snd.handler)
132 pnd_snd.handler(&pnd_snd.priv, link, wc->status);
133 wake_up(&link->wr_tx_wait);
134}
135
Allen Paisfcb8e3a2020-11-03 14:48:22 +0530136static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t)
Ursula Braunf38ba1792017-01-09 16:55:19 +0100137{
Allen Paisfcb8e3a2020-11-03 14:48:22 +0530138 struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100139 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
140 int i = 0, rc;
141 int polled = 0;
142
143again:
144 polled++;
145 do {
Ursula Braun86e780d2018-01-24 10:28:15 +0100146 memset(&wc, 0, sizeof(wc));
Ursula Braunf38ba1792017-01-09 16:55:19 +0100147 rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
148 if (polled == 1) {
149 ib_req_notify_cq(dev->roce_cq_send,
150 IB_CQ_NEXT_COMP |
151 IB_CQ_REPORT_MISSED_EVENTS);
152 }
153 if (!rc)
154 break;
155 for (i = 0; i < rc; i++)
156 smc_wr_tx_process_cqe(&wc[i]);
157 } while (rc > 0);
158 if (polled == 1)
159 goto again;
160}
161
162void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
163{
164 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
165
166 tasklet_schedule(&dev->send_tasklet);
167}
168
169/*---------------------------- request submission ---------------------------*/
170
171static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
172{
173 *idx = link->wr_tx_cnt;
Dust Li90cee522021-12-28 17:03:24 +0800174 if (!smc_link_sendable(link))
Karsten Graulb7eede72020-07-08 17:05:12 +0200175 return -ENOLINK;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100176 for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
177 if (!test_and_set_bit(*idx, link->wr_tx_mask))
178 return 0;
179 }
180 *idx = link->wr_tx_cnt;
181 return -EBUSY;
182}
183
184/**
185 * smc_wr_tx_get_free_slot() - returns buffer for message assembly,
186 * and sets info for pending transmit tracking
187 * @link: Pointer to smc_link used to later send the message.
188 * @handler: Send completion handler function pointer.
189 * @wr_buf: Out value returns pointer to message buffer.
Ursula Braunad6f3172019-02-04 13:44:44 +0100190 * @wr_rdma_buf: Out value returns pointer to rdma work request.
Ursula Braunf38ba1792017-01-09 16:55:19 +0100191 * @wr_pend_priv: Out value returns pointer serving as handler context.
192 *
193 * Return: 0 on success, or -errno on error.
194 */
195int smc_wr_tx_get_free_slot(struct smc_link *link,
196 smc_wr_tx_handler handler,
197 struct smc_wr_buf **wr_buf,
Ursula Braunad6f3172019-02-04 13:44:44 +0100198 struct smc_rdma_wr **wr_rdma_buf,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100199 struct smc_wr_tx_pend_priv **wr_pend_priv)
200{
Ursula Braun15e1b992019-11-14 13:02:44 +0100201 struct smc_link_group *lgr = smc_get_lgr(link);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100202 struct smc_wr_tx_pend *wr_pend;
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100203 u32 idx = link->wr_tx_cnt;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100204 struct ib_send_wr *wr_ib;
205 u64 wr_id;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100206 int rc;
207
208 *wr_buf = NULL;
209 *wr_pend_priv = NULL;
Ursula Braun15e1b992019-11-14 13:02:44 +0100210 if (in_softirq() || lgr->terminating) {
Ursula Braunf38ba1792017-01-09 16:55:19 +0100211 rc = smc_wr_tx_get_free_slot_index(link, &idx);
212 if (rc)
213 return rc;
214 } else {
Ursula Braun15e1b992019-11-14 13:02:44 +0100215 rc = wait_event_interruptible_timeout(
Ursula Braunf38ba1792017-01-09 16:55:19 +0100216 link->wr_tx_wait,
Dust Li90cee522021-12-28 17:03:24 +0800217 !smc_link_sendable(link) ||
Ursula Braun15e1b992019-11-14 13:02:44 +0100218 lgr->terminating ||
Ursula Braunf38ba1792017-01-09 16:55:19 +0100219 (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
220 SMC_WR_TX_WAIT_FREE_SLOT_TIME);
221 if (!rc) {
Karsten Graul87523932020-05-01 12:48:09 +0200222 /* timeout - terminate link */
223 smcr_link_down_cond_sched(link);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100224 return -EPIPE;
225 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100226 if (idx == link->wr_tx_cnt)
227 return -EPIPE;
228 }
229 wr_id = smc_wr_tx_get_next_wr_id(link);
230 wr_pend = &link->wr_tx_pends[idx];
231 wr_pend->wr_id = wr_id;
232 wr_pend->handler = handler;
233 wr_pend->link = link;
234 wr_pend->idx = idx;
235 wr_ib = &link->wr_tx_ibs[idx];
236 wr_ib->wr_id = wr_id;
237 *wr_buf = &link->wr_tx_bufs[idx];
Ursula Braunad6f3172019-02-04 13:44:44 +0100238 if (wr_rdma_buf)
239 *wr_rdma_buf = &link->wr_tx_rdmas[idx];
Ursula Braunf38ba1792017-01-09 16:55:19 +0100240 *wr_pend_priv = &wr_pend->priv;
241 return 0;
242}
243
Karsten Graulb4ba4652021-10-16 11:37:50 +0200244int smc_wr_tx_get_v2_slot(struct smc_link *link,
245 smc_wr_tx_handler handler,
246 struct smc_wr_v2_buf **wr_buf,
247 struct smc_wr_tx_pend_priv **wr_pend_priv)
248{
249 struct smc_wr_tx_pend *wr_pend;
250 struct ib_send_wr *wr_ib;
251 u64 wr_id;
252
253 if (link->wr_tx_v2_pend->idx == link->wr_tx_cnt)
254 return -EBUSY;
255
256 *wr_buf = NULL;
257 *wr_pend_priv = NULL;
258 wr_id = smc_wr_tx_get_next_wr_id(link);
259 wr_pend = link->wr_tx_v2_pend;
260 wr_pend->wr_id = wr_id;
261 wr_pend->handler = handler;
262 wr_pend->link = link;
263 wr_pend->idx = link->wr_tx_cnt;
264 wr_ib = link->wr_tx_v2_ib;
265 wr_ib->wr_id = wr_id;
266 *wr_buf = link->lgr->wr_tx_buf_v2;
267 *wr_pend_priv = &wr_pend->priv;
268 return 0;
269}
270
Ursula Braunf38ba1792017-01-09 16:55:19 +0100271int smc_wr_tx_put_slot(struct smc_link *link,
272 struct smc_wr_tx_pend_priv *wr_pend_priv)
273{
274 struct smc_wr_tx_pend *pend;
275
276 pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
277 if (pend->idx < link->wr_tx_cnt) {
Ursula Braune438bae2018-11-20 16:46:43 +0100278 u32 idx = pend->idx;
279
Ursula Braunf38ba1792017-01-09 16:55:19 +0100280 /* clear the full struct smc_wr_tx_pend including .priv */
Karsten Graul46ad0222019-01-30 18:51:08 +0100281 memset(&link->wr_tx_pends[idx], 0,
282 sizeof(link->wr_tx_pends[idx]));
283 memset(&link->wr_tx_bufs[idx], 0,
284 sizeof(link->wr_tx_bufs[idx]));
Ursula Braune438bae2018-11-20 16:46:43 +0100285 test_and_clear_bit(idx, link->wr_tx_mask);
Ursula Braun6a37ad32019-11-14 13:02:46 +0100286 wake_up(&link->wr_tx_wait);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100287 return 1;
Karsten Graul8799e312021-10-16 11:37:49 +0200288 } else if (link->lgr->smc_version == SMC_V2 &&
289 pend->idx == link->wr_tx_cnt) {
290 /* Large v2 buffer */
291 memset(&link->wr_tx_v2_pend, 0,
292 sizeof(link->wr_tx_v2_pend));
293 memset(&link->lgr->wr_tx_buf_v2, 0,
294 sizeof(link->lgr->wr_tx_buf_v2));
295 return 1;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100296 }
297
298 return 0;
299}
300
301/* Send prepared WR slot via ib_post_send.
302 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
303 */
304int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
305{
Ursula Braunf38ba1792017-01-09 16:55:19 +0100306 struct smc_wr_tx_pend *pend;
307 int rc;
308
309 ib_req_notify_cq(link->smcibdev->roce_cq_send,
Ursula Braun8301fa42017-09-21 09:16:30 +0200310 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100311 pend = container_of(priv, struct smc_wr_tx_pend, priv);
Bart Van Assche2e3bbe42018-07-18 09:25:30 -0700312 rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL);
Ursula Braunb4772b32018-01-25 11:15:33 +0100313 if (rc) {
Ursula Braunf38ba1792017-01-09 16:55:19 +0100314 smc_wr_tx_put_slot(link, priv);
Karsten Graul87523932020-05-01 12:48:09 +0200315 smcr_link_down_cond_sched(link);
Ursula Braunb4772b32018-01-25 11:15:33 +0100316 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100317 return rc;
318}
319
Karsten Graulb4ba4652021-10-16 11:37:50 +0200320int smc_wr_tx_v2_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
321 int len)
322{
323 int rc;
324
325 link->wr_tx_v2_ib->sg_list[0].length = len;
326 ib_req_notify_cq(link->smcibdev->roce_cq_send,
327 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
328 rc = ib_post_send(link->roce_qp, link->wr_tx_v2_ib, NULL);
329 if (rc) {
330 smc_wr_tx_put_slot(link, priv);
331 smcr_link_down_cond_sched(link);
332 }
333 return rc;
334}
335
Karsten Graul09c61d22020-05-04 14:18:41 +0200336/* Send prepared WR slot via ib_post_send and wait for send completion
337 * notification.
338 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
339 */
340int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
341 unsigned long timeout)
342{
343 struct smc_wr_tx_pend *pend;
Karsten Graul6d7373d2021-12-27 14:35:30 +0100344 u32 pnd_idx;
Karsten Graul09c61d22020-05-04 14:18:41 +0200345 int rc;
346
347 pend = container_of(priv, struct smc_wr_tx_pend, priv);
348 pend->compl_requested = 1;
Karsten Graul6d7373d2021-12-27 14:35:30 +0100349 pnd_idx = pend->idx;
350 init_completion(&link->wr_tx_compl[pnd_idx]);
Karsten Graul09c61d22020-05-04 14:18:41 +0200351
352 rc = smc_wr_tx_send(link, priv);
353 if (rc)
354 return rc;
355 /* wait for completion by smc_wr_tx_process_cqe() */
356 rc = wait_for_completion_interruptible_timeout(
Karsten Graul6d7373d2021-12-27 14:35:30 +0100357 &link->wr_tx_compl[pnd_idx], timeout);
Karsten Graul09c61d22020-05-04 14:18:41 +0200358 if (rc <= 0)
359 rc = -ENODATA;
360 if (rc > 0)
361 rc = 0;
362 return rc;
363}
364
Ursula Braun652a1e42017-07-28 13:56:17 +0200365/* Register a memory region and wait for result. */
366int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
367{
Ursula Braun652a1e42017-07-28 13:56:17 +0200368 int rc;
369
370 ib_req_notify_cq(link->smcibdev->roce_cq_send,
371 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
372 link->wr_reg_state = POSTED;
373 link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr;
374 link->wr_reg.mr = mr;
375 link->wr_reg.key = mr->rkey;
Bart Van Assche2e3bbe42018-07-18 09:25:30 -0700376 rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, NULL);
Ursula Braun652a1e42017-07-28 13:56:17 +0200377 if (rc)
378 return rc;
379
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200380 atomic_inc(&link->wr_reg_refcnt);
Ursula Braun652a1e42017-07-28 13:56:17 +0200381 rc = wait_event_interruptible_timeout(link->wr_reg_wait,
382 (link->wr_reg_state != POSTED),
383 SMC_WR_REG_MR_WAIT_TIME);
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200384 if (atomic_dec_and_test(&link->wr_reg_refcnt))
385 wake_up_all(&link->wr_reg_wait);
Ursula Braun652a1e42017-07-28 13:56:17 +0200386 if (!rc) {
Karsten Graul87523932020-05-01 12:48:09 +0200387 /* timeout - terminate link */
388 smcr_link_down_cond_sched(link);
Ursula Braun652a1e42017-07-28 13:56:17 +0200389 return -EPIPE;
390 }
391 if (rc == -ERESTARTSYS)
392 return -EINTR;
393 switch (link->wr_reg_state) {
394 case CONFIRMED:
395 rc = 0;
396 break;
397 case FAILED:
398 rc = -EIO;
399 break;
400 case POSTED:
401 rc = -EPIPE;
402 break;
403 }
404 return rc;
405}
406
Ursula Braunf38ba1792017-01-09 16:55:19 +0100407/****************************** receive queue ********************************/
408
409int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
410{
411 struct smc_wr_rx_handler *h_iter;
412 int rc = 0;
413
414 spin_lock(&smc_wr_rx_hash_lock);
415 hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) {
416 if (h_iter->type == handler->type) {
417 rc = -EEXIST;
418 goto out_unlock;
419 }
420 }
421 hash_add(smc_wr_rx_hash, &handler->list, handler->type);
422out_unlock:
423 spin_unlock(&smc_wr_rx_hash_lock);
424 return rc;
425}
426
427/* Demultiplex a received work request based on the message type to its handler.
428 * Relies on smc_wr_rx_hash having been completely filled before any IB WRs,
429 * and not being modified any more afterwards so we don't need to lock it.
430 */
431static inline void smc_wr_rx_demultiplex(struct ib_wc *wc)
432{
433 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
434 struct smc_wr_rx_handler *handler;
435 struct smc_wr_rx_hdr *wr_rx;
436 u64 temp_wr_id;
437 u32 index;
438
439 if (wc->byte_len < sizeof(*wr_rx))
440 return; /* short message */
441 temp_wr_id = wc->wr_id;
442 index = do_div(temp_wr_id, link->wr_rx_cnt);
443 wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index];
444 hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) {
445 if (handler->type == wr_rx->type)
446 handler->handler(wc, wr_rx);
447 }
448}
449
450static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
451{
452 struct smc_link *link;
453 int i;
454
455 for (i = 0; i < num; i++) {
456 link = wc[i].qp->qp_context;
457 if (wc[i].status == IB_WC_SUCCESS) {
Karsten Graul877ae5b2018-05-02 16:56:44 +0200458 link->wr_rx_tstamp = jiffies;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100459 smc_wr_rx_demultiplex(&wc[i]);
460 smc_wr_rx_post(link); /* refill WR RX */
461 } else {
462 /* handle status errors */
463 switch (wc[i].status) {
464 case IB_WC_RETRY_EXC_ERR:
465 case IB_WC_RNR_RETRY_EXC_ERR:
466 case IB_WC_WR_FLUSH_ERR:
Karsten Graul87523932020-05-01 12:48:09 +0200467 smcr_link_down_cond_sched(link);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100468 break;
469 default:
470 smc_wr_rx_post(link); /* refill WR RX */
471 break;
472 }
473 }
474 }
475}
476
Allen Paisfcb8e3a2020-11-03 14:48:22 +0530477static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t)
Ursula Braunf38ba1792017-01-09 16:55:19 +0100478{
Allen Paisfcb8e3a2020-11-03 14:48:22 +0530479 struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100480 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
481 int polled = 0;
482 int rc;
483
484again:
485 polled++;
486 do {
487 memset(&wc, 0, sizeof(wc));
488 rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
489 if (polled == 1) {
490 ib_req_notify_cq(dev->roce_cq_recv,
491 IB_CQ_SOLICITED_MASK
492 | IB_CQ_REPORT_MISSED_EVENTS);
493 }
494 if (!rc)
495 break;
496 smc_wr_rx_process_cqes(&wc[0], rc);
497 } while (rc > 0);
498 if (polled == 1)
499 goto again;
500}
501
502void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
503{
504 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
505
506 tasklet_schedule(&dev->recv_tasklet);
507}
508
509int smc_wr_rx_post_init(struct smc_link *link)
510{
511 u32 i;
512 int rc = 0;
513
514 for (i = 0; i < link->wr_rx_cnt; i++)
515 rc = smc_wr_rx_post(link);
516 return rc;
517}
518
519/***************************** init, exit, misc ******************************/
520
521void smc_wr_remember_qp_attr(struct smc_link *lnk)
522{
523 struct ib_qp_attr *attr = &lnk->qp_attr;
524 struct ib_qp_init_attr init_attr;
525
526 memset(attr, 0, sizeof(*attr));
527 memset(&init_attr, 0, sizeof(init_attr));
528 ib_query_qp(lnk->roce_qp, attr,
529 IB_QP_STATE |
530 IB_QP_CUR_STATE |
531 IB_QP_PKEY_INDEX |
532 IB_QP_PORT |
533 IB_QP_QKEY |
534 IB_QP_AV |
535 IB_QP_PATH_MTU |
536 IB_QP_TIMEOUT |
537 IB_QP_RETRY_CNT |
538 IB_QP_RNR_RETRY |
539 IB_QP_RQ_PSN |
540 IB_QP_ALT_PATH |
541 IB_QP_MIN_RNR_TIMER |
542 IB_QP_SQ_PSN |
543 IB_QP_PATH_MIG_STATE |
544 IB_QP_CAP |
545 IB_QP_DEST_QPN,
546 &init_attr);
547
548 lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT,
549 lnk->qp_attr.cap.max_send_wr);
550 lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3,
551 lnk->qp_attr.cap.max_recv_wr);
552}
553
554static void smc_wr_init_sge(struct smc_link *lnk)
555{
Karsten Graul8799e312021-10-16 11:37:49 +0200556 int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100557 u32 i;
558
559 for (i = 0; i < lnk->wr_tx_cnt; i++) {
560 lnk->wr_tx_sges[i].addr =
561 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
562 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100563 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
Ursula Braunad6f3172019-02-04 13:44:44 +0100564 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
565 lnk->roce_pd->local_dma_lkey;
566 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
567 lnk->roce_pd->local_dma_lkey;
568 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
569 lnk->roce_pd->local_dma_lkey;
570 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
571 lnk->roce_pd->local_dma_lkey;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100572 lnk->wr_tx_ibs[i].next = NULL;
573 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
574 lnk->wr_tx_ibs[i].num_sge = 1;
575 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
576 lnk->wr_tx_ibs[i].send_flags =
Ursula Braun2c9c1682017-04-10 14:58:05 +0200577 IB_SEND_SIGNALED | IB_SEND_SOLICITED;
Ursula Braunad6f3172019-02-04 13:44:44 +0100578 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
579 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
580 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
581 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
582 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
583 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100584 }
Karsten Graul8799e312021-10-16 11:37:49 +0200585
586 if (lnk->lgr->smc_version == SMC_V2) {
587 lnk->wr_tx_v2_sge->addr = lnk->wr_tx_v2_dma_addr;
588 lnk->wr_tx_v2_sge->length = SMC_WR_BUF_V2_SIZE;
589 lnk->wr_tx_v2_sge->lkey = lnk->roce_pd->local_dma_lkey;
590
591 lnk->wr_tx_v2_ib->next = NULL;
592 lnk->wr_tx_v2_ib->sg_list = lnk->wr_tx_v2_sge;
593 lnk->wr_tx_v2_ib->num_sge = 1;
594 lnk->wr_tx_v2_ib->opcode = IB_WR_SEND;
595 lnk->wr_tx_v2_ib->send_flags =
596 IB_SEND_SIGNALED | IB_SEND_SOLICITED;
597 }
598
599 /* With SMC-Rv2 there can be messages larger than SMC_WR_TX_SIZE.
600 * Each ib_recv_wr gets 2 sges, the second one is a spillover buffer
601 * and the same buffer for all sges. When a larger message arrived then
602 * the content of the first small sge is copied to the beginning of
603 * the larger spillover buffer, allowing easy data mapping.
604 */
Ursula Braunf38ba1792017-01-09 16:55:19 +0100605 for (i = 0; i < lnk->wr_rx_cnt; i++) {
Karsten Graul8799e312021-10-16 11:37:49 +0200606 int x = i * sges_per_buf;
607
608 lnk->wr_rx_sges[x].addr =
Ursula Braunf38ba1792017-01-09 16:55:19 +0100609 lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
Karsten Graul8799e312021-10-16 11:37:49 +0200610 lnk->wr_rx_sges[x].length = SMC_WR_TX_SIZE;
611 lnk->wr_rx_sges[x].lkey = lnk->roce_pd->local_dma_lkey;
612 if (lnk->lgr->smc_version == SMC_V2) {
613 lnk->wr_rx_sges[x + 1].addr =
614 lnk->wr_rx_v2_dma_addr + SMC_WR_TX_SIZE;
615 lnk->wr_rx_sges[x + 1].length =
616 SMC_WR_BUF_V2_SIZE - SMC_WR_TX_SIZE;
617 lnk->wr_rx_sges[x + 1].lkey =
618 lnk->roce_pd->local_dma_lkey;
619 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100620 lnk->wr_rx_ibs[i].next = NULL;
Karsten Graul8799e312021-10-16 11:37:49 +0200621 lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[x];
622 lnk->wr_rx_ibs[i].num_sge = sges_per_buf;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100623 }
Ursula Braun652a1e42017-07-28 13:56:17 +0200624 lnk->wr_reg.wr.next = NULL;
625 lnk->wr_reg.wr.num_sge = 0;
626 lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED;
627 lnk->wr_reg.wr.opcode = IB_WR_REG_MR;
628 lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100629}
630
631void smc_wr_free_link(struct smc_link *lnk)
632{
633 struct ib_device *ibdev;
634
Karsten Graulb7eede72020-07-08 17:05:12 +0200635 if (!lnk->smcibdev)
636 return;
637 ibdev = lnk->smcibdev->ibdev;
638
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200639 smc_wr_wakeup_reg_wait(lnk);
640 smc_wr_wakeup_tx_wait(lnk);
641
Dust Li349d4312021-12-28 17:03:25 +0800642 smc_wr_tx_wait_no_pending_sends(lnk);
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200643 wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
644 wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
Ursula Braunf38ba1792017-01-09 16:55:19 +0100645
Ursula Braunf38ba1792017-01-09 16:55:19 +0100646 if (lnk->wr_rx_dma_addr) {
647 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
648 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
649 DMA_FROM_DEVICE);
650 lnk->wr_rx_dma_addr = 0;
651 }
Karsten Graul8799e312021-10-16 11:37:49 +0200652 if (lnk->wr_rx_v2_dma_addr) {
653 ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr,
654 SMC_WR_BUF_V2_SIZE,
655 DMA_FROM_DEVICE);
656 lnk->wr_rx_v2_dma_addr = 0;
657 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100658 if (lnk->wr_tx_dma_addr) {
659 ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr,
660 SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
661 DMA_TO_DEVICE);
662 lnk->wr_tx_dma_addr = 0;
663 }
Karsten Graul8799e312021-10-16 11:37:49 +0200664 if (lnk->wr_tx_v2_dma_addr) {
665 ib_dma_unmap_single(ibdev, lnk->wr_tx_v2_dma_addr,
666 SMC_WR_BUF_V2_SIZE,
667 DMA_TO_DEVICE);
668 lnk->wr_tx_v2_dma_addr = 0;
669 }
670}
671
672void smc_wr_free_lgr_mem(struct smc_link_group *lgr)
673{
674 if (lgr->smc_version < SMC_V2)
675 return;
676
677 kfree(lgr->wr_rx_buf_v2);
678 lgr->wr_rx_buf_v2 = NULL;
679 kfree(lgr->wr_tx_buf_v2);
680 lgr->wr_tx_buf_v2 = NULL;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100681}
682
683void smc_wr_free_link_mem(struct smc_link *lnk)
684{
Karsten Graul8799e312021-10-16 11:37:49 +0200685 kfree(lnk->wr_tx_v2_ib);
686 lnk->wr_tx_v2_ib = NULL;
687 kfree(lnk->wr_tx_v2_sge);
688 lnk->wr_tx_v2_sge = NULL;
689 kfree(lnk->wr_tx_v2_pend);
690 lnk->wr_tx_v2_pend = NULL;
Karsten Graul09c61d22020-05-04 14:18:41 +0200691 kfree(lnk->wr_tx_compl);
692 lnk->wr_tx_compl = NULL;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100693 kfree(lnk->wr_tx_pends);
694 lnk->wr_tx_pends = NULL;
Christophe JAILLET49dc9012021-12-30 11:40:40 +0100695 bitmap_free(lnk->wr_tx_mask);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100696 lnk->wr_tx_mask = NULL;
697 kfree(lnk->wr_tx_sges);
698 lnk->wr_tx_sges = NULL;
Ursula Braunad6f3172019-02-04 13:44:44 +0100699 kfree(lnk->wr_tx_rdma_sges);
700 lnk->wr_tx_rdma_sges = NULL;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100701 kfree(lnk->wr_rx_sges);
702 lnk->wr_rx_sges = NULL;
Ursula Braunad6f3172019-02-04 13:44:44 +0100703 kfree(lnk->wr_tx_rdmas);
704 lnk->wr_tx_rdmas = NULL;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100705 kfree(lnk->wr_rx_ibs);
706 lnk->wr_rx_ibs = NULL;
707 kfree(lnk->wr_tx_ibs);
708 lnk->wr_tx_ibs = NULL;
709 kfree(lnk->wr_tx_bufs);
710 lnk->wr_tx_bufs = NULL;
711 kfree(lnk->wr_rx_bufs);
712 lnk->wr_rx_bufs = NULL;
713}
714
Karsten Graul8799e312021-10-16 11:37:49 +0200715int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr)
716{
717 if (lgr->smc_version < SMC_V2)
718 return 0;
719
720 lgr->wr_rx_buf_v2 = kzalloc(SMC_WR_BUF_V2_SIZE, GFP_KERNEL);
721 if (!lgr->wr_rx_buf_v2)
722 return -ENOMEM;
723 lgr->wr_tx_buf_v2 = kzalloc(SMC_WR_BUF_V2_SIZE, GFP_KERNEL);
724 if (!lgr->wr_tx_buf_v2) {
725 kfree(lgr->wr_rx_buf_v2);
726 return -ENOMEM;
727 }
728 return 0;
729}
730
Ursula Braunf38ba1792017-01-09 16:55:19 +0100731int smc_wr_alloc_link_mem(struct smc_link *link)
732{
Karsten Graul8799e312021-10-16 11:37:49 +0200733 int sges_per_buf = link->lgr->smc_version == SMC_V2 ? 2 : 1;
734
Ursula Braunf38ba1792017-01-09 16:55:19 +0100735 /* allocate link related memory */
736 link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
737 if (!link->wr_tx_bufs)
738 goto no_mem;
739 link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE,
740 GFP_KERNEL);
741 if (!link->wr_rx_bufs)
742 goto no_mem_wr_tx_bufs;
743 link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]),
744 GFP_KERNEL);
745 if (!link->wr_tx_ibs)
746 goto no_mem_wr_rx_bufs;
747 link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3,
748 sizeof(link->wr_rx_ibs[0]),
749 GFP_KERNEL);
750 if (!link->wr_rx_ibs)
751 goto no_mem_wr_tx_ibs;
Ursula Braunad6f3172019-02-04 13:44:44 +0100752 link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
753 sizeof(link->wr_tx_rdmas[0]),
754 GFP_KERNEL);
755 if (!link->wr_tx_rdmas)
756 goto no_mem_wr_rx_ibs;
757 link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
758 sizeof(link->wr_tx_rdma_sges[0]),
759 GFP_KERNEL);
760 if (!link->wr_tx_rdma_sges)
761 goto no_mem_wr_tx_rdmas;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100762 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
763 GFP_KERNEL);
764 if (!link->wr_tx_sges)
Ursula Braunad6f3172019-02-04 13:44:44 +0100765 goto no_mem_wr_tx_rdma_sges;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100766 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
Karsten Graul8799e312021-10-16 11:37:49 +0200767 sizeof(link->wr_rx_sges[0]) * sges_per_buf,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100768 GFP_KERNEL);
769 if (!link->wr_rx_sges)
770 goto no_mem_wr_tx_sges;
Christophe JAILLET49dc9012021-12-30 11:40:40 +0100771 link->wr_tx_mask = bitmap_zalloc(SMC_WR_BUF_CNT, GFP_KERNEL);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100772 if (!link->wr_tx_mask)
773 goto no_mem_wr_rx_sges;
774 link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
775 sizeof(link->wr_tx_pends[0]),
776 GFP_KERNEL);
777 if (!link->wr_tx_pends)
778 goto no_mem_wr_tx_mask;
Karsten Graul09c61d22020-05-04 14:18:41 +0200779 link->wr_tx_compl = kcalloc(SMC_WR_BUF_CNT,
780 sizeof(link->wr_tx_compl[0]),
781 GFP_KERNEL);
782 if (!link->wr_tx_compl)
783 goto no_mem_wr_tx_pends;
Karsten Graul8799e312021-10-16 11:37:49 +0200784
785 if (link->lgr->smc_version == SMC_V2) {
786 link->wr_tx_v2_ib = kzalloc(sizeof(*link->wr_tx_v2_ib),
787 GFP_KERNEL);
788 if (!link->wr_tx_v2_ib)
789 goto no_mem_tx_compl;
790 link->wr_tx_v2_sge = kzalloc(sizeof(*link->wr_tx_v2_sge),
791 GFP_KERNEL);
792 if (!link->wr_tx_v2_sge)
793 goto no_mem_v2_ib;
794 link->wr_tx_v2_pend = kzalloc(sizeof(*link->wr_tx_v2_pend),
795 GFP_KERNEL);
796 if (!link->wr_tx_v2_pend)
797 goto no_mem_v2_sge;
798 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100799 return 0;
800
Karsten Graul8799e312021-10-16 11:37:49 +0200801no_mem_v2_sge:
802 kfree(link->wr_tx_v2_sge);
803no_mem_v2_ib:
804 kfree(link->wr_tx_v2_ib);
805no_mem_tx_compl:
806 kfree(link->wr_tx_compl);
Karsten Graul09c61d22020-05-04 14:18:41 +0200807no_mem_wr_tx_pends:
808 kfree(link->wr_tx_pends);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100809no_mem_wr_tx_mask:
810 kfree(link->wr_tx_mask);
811no_mem_wr_rx_sges:
812 kfree(link->wr_rx_sges);
813no_mem_wr_tx_sges:
814 kfree(link->wr_tx_sges);
Ursula Braunad6f3172019-02-04 13:44:44 +0100815no_mem_wr_tx_rdma_sges:
816 kfree(link->wr_tx_rdma_sges);
817no_mem_wr_tx_rdmas:
818 kfree(link->wr_tx_rdmas);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100819no_mem_wr_rx_ibs:
820 kfree(link->wr_rx_ibs);
821no_mem_wr_tx_ibs:
822 kfree(link->wr_tx_ibs);
823no_mem_wr_rx_bufs:
824 kfree(link->wr_rx_bufs);
825no_mem_wr_tx_bufs:
826 kfree(link->wr_tx_bufs);
827no_mem:
828 return -ENOMEM;
829}
830
831void smc_wr_remove_dev(struct smc_ib_device *smcibdev)
832{
833 tasklet_kill(&smcibdev->recv_tasklet);
834 tasklet_kill(&smcibdev->send_tasklet);
835}
836
837void smc_wr_add_dev(struct smc_ib_device *smcibdev)
838{
Allen Paisfcb8e3a2020-11-03 14:48:22 +0530839 tasklet_setup(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn);
840 tasklet_setup(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100841}
842
843int smc_wr_create_link(struct smc_link *lnk)
844{
845 struct ib_device *ibdev = lnk->smcibdev->ibdev;
846 int rc = 0;
847
848 smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0);
849 lnk->wr_rx_id = 0;
850 lnk->wr_rx_dma_addr = ib_dma_map_single(
851 ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
852 DMA_FROM_DEVICE);
853 if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) {
854 lnk->wr_rx_dma_addr = 0;
855 rc = -EIO;
856 goto out;
857 }
Karsten Graul8799e312021-10-16 11:37:49 +0200858 if (lnk->lgr->smc_version == SMC_V2) {
859 lnk->wr_rx_v2_dma_addr = ib_dma_map_single(ibdev,
860 lnk->lgr->wr_rx_buf_v2, SMC_WR_BUF_V2_SIZE,
861 DMA_FROM_DEVICE);
862 if (ib_dma_mapping_error(ibdev, lnk->wr_rx_v2_dma_addr)) {
863 lnk->wr_rx_v2_dma_addr = 0;
864 rc = -EIO;
865 goto dma_unmap;
866 }
867 lnk->wr_tx_v2_dma_addr = ib_dma_map_single(ibdev,
868 lnk->lgr->wr_tx_buf_v2, SMC_WR_BUF_V2_SIZE,
869 DMA_TO_DEVICE);
870 if (ib_dma_mapping_error(ibdev, lnk->wr_tx_v2_dma_addr)) {
871 lnk->wr_tx_v2_dma_addr = 0;
872 rc = -EIO;
873 goto dma_unmap;
874 }
875 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100876 lnk->wr_tx_dma_addr = ib_dma_map_single(
877 ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
878 DMA_TO_DEVICE);
879 if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) {
880 rc = -EIO;
881 goto dma_unmap;
882 }
883 smc_wr_init_sge(lnk);
Christophe JAILLET49dc9012021-12-30 11:40:40 +0100884 bitmap_zero(lnk->wr_tx_mask, SMC_WR_BUF_CNT);
Ursula Braun652a1e42017-07-28 13:56:17 +0200885 init_waitqueue_head(&lnk->wr_tx_wait);
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200886 atomic_set(&lnk->wr_tx_refcnt, 0);
Ursula Braun652a1e42017-07-28 13:56:17 +0200887 init_waitqueue_head(&lnk->wr_reg_wait);
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200888 atomic_set(&lnk->wr_reg_refcnt, 0);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100889 return rc;
890
891dma_unmap:
Karsten Graul8799e312021-10-16 11:37:49 +0200892 if (lnk->wr_rx_v2_dma_addr) {
893 ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr,
894 SMC_WR_BUF_V2_SIZE,
895 DMA_FROM_DEVICE);
896 lnk->wr_rx_v2_dma_addr = 0;
897 }
898 if (lnk->wr_tx_v2_dma_addr) {
899 ib_dma_unmap_single(ibdev, lnk->wr_tx_v2_dma_addr,
900 SMC_WR_BUF_V2_SIZE,
901 DMA_TO_DEVICE);
902 lnk->wr_tx_v2_dma_addr = 0;
903 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100904 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
905 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
906 DMA_FROM_DEVICE);
907 lnk->wr_rx_dma_addr = 0;
908out:
909 return rc;
910}