blob: 79a7431f534e2cb2d4fd1719fd530426b9d0da7f [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braunf38ba1792017-01-09 16:55:19 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Work Requests exploiting Infiniband API
6 *
7 * Work requests (WR) of type ib_post_send or ib_post_recv respectively
8 * are submitted to either RC SQ or RC RQ respectively
9 * (reliably connected send/receive queue)
10 * and become work queue entries (WQEs).
11 * While an SQ WR/WQE is pending, we track it until transmission completion.
12 * Through a send or receive completion queue (CQ) respectively,
13 * we get completion queue entries (CQEs) [aka work completions (WCs)].
14 * Since the CQ callback is called from IRQ context, we split work by using
15 * bottom halves implemented by tasklets.
16 *
17 * SMC uses this to exchange LLC (link layer control)
18 * and CDC (connection data control) messages.
19 *
20 * Copyright IBM Corp. 2016
21 *
22 * Author(s): Steffen Maier <maier@linux.vnet.ibm.com>
23 */
24
25#include <linux/atomic.h>
26#include <linux/hashtable.h>
27#include <linux/wait.h>
28#include <rdma/ib_verbs.h>
29#include <asm/div64.h>
30
31#include "smc.h"
32#include "smc_wr.h"
33
34#define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */
35
36#define SMC_WR_RX_HASH_BITS 4
37static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS);
38static DEFINE_SPINLOCK(smc_wr_rx_hash_lock);
39
40struct smc_wr_tx_pend { /* control data for a pending send request */
41 u64 wr_id; /* work request id sent */
42 smc_wr_tx_handler handler;
43 enum ib_wc_status wc_status; /* CQE status */
44 struct smc_link *link;
45 u32 idx;
46 struct smc_wr_tx_pend_priv priv;
Karsten Graul09c61d22020-05-04 14:18:41 +020047 u8 compl_requested;
Ursula Braunf38ba1792017-01-09 16:55:19 +010048};
49
50/******************************** send queue *********************************/
51
52/*------------------------------- completion --------------------------------*/
53
Ursula Braun6a37ad32019-11-14 13:02:46 +010054/* returns true if at least one tx work request is pending on the given link */
55static inline bool smc_wr_is_tx_pend(struct smc_link *link)
56{
57 if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) !=
58 link->wr_tx_cnt) {
59 return true;
60 }
61 return false;
62}
63
64/* wait till all pending tx work requests on the given link are completed */
Karsten Graulc9a5d242020-05-03 14:38:46 +020065int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
Ursula Braun6a37ad32019-11-14 13:02:46 +010066{
67 if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
68 SMC_WR_TX_WAIT_PENDING_TIME))
69 return 0;
70 else /* timeout */
71 return -EPIPE;
72}
73
Ursula Braunf38ba1792017-01-09 16:55:19 +010074static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
75{
76 u32 i;
77
78 for (i = 0; i < link->wr_tx_cnt; i++) {
79 if (link->wr_tx_pends[i].wr_id == wr_id)
80 return i;
81 }
82 return link->wr_tx_cnt;
83}
84
85static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
86{
87 struct smc_wr_tx_pend pnd_snd;
88 struct smc_link *link;
89 u32 pnd_snd_idx;
90 int i;
91
92 link = wc->qp->qp_context;
Ursula Braun652a1e42017-07-28 13:56:17 +020093
94 if (wc->opcode == IB_WC_REG_MR) {
95 if (wc->status)
96 link->wr_reg_state = FAILED;
97 else
98 link->wr_reg_state = CONFIRMED;
Ursula Braun15e1b992019-11-14 13:02:44 +010099 smc_wr_wakeup_reg_wait(link);
Ursula Braun652a1e42017-07-28 13:56:17 +0200100 return;
101 }
102
Ursula Braunf38ba1792017-01-09 16:55:19 +0100103 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id);
Karsten Graul8799e312021-10-16 11:37:49 +0200104 if (pnd_snd_idx == link->wr_tx_cnt) {
105 if (link->lgr->smc_version != SMC_V2 ||
106 link->wr_tx_v2_pend->wr_id != wc->wr_id)
107 return;
108 link->wr_tx_v2_pend->wc_status = wc->status;
109 memcpy(&pnd_snd, link->wr_tx_v2_pend, sizeof(pnd_snd));
110 /* clear the full struct smc_wr_tx_pend including .priv */
111 memset(link->wr_tx_v2_pend, 0,
112 sizeof(*link->wr_tx_v2_pend));
113 memset(link->lgr->wr_tx_buf_v2, 0,
114 sizeof(*link->lgr->wr_tx_buf_v2));
115 } else {
116 link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
117 if (link->wr_tx_pends[pnd_snd_idx].compl_requested)
118 complete(&link->wr_tx_compl[pnd_snd_idx]);
119 memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx],
120 sizeof(pnd_snd));
121 /* clear the full struct smc_wr_tx_pend including .priv */
122 memset(&link->wr_tx_pends[pnd_snd_idx], 0,
123 sizeof(link->wr_tx_pends[pnd_snd_idx]));
124 memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
125 sizeof(link->wr_tx_bufs[pnd_snd_idx]));
126 if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
127 return;
128 }
129
Ursula Braunf38ba1792017-01-09 16:55:19 +0100130 if (wc->status) {
131 for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
132 /* clear full struct smc_wr_tx_pend including .priv */
133 memset(&link->wr_tx_pends[i], 0,
134 sizeof(link->wr_tx_pends[i]));
135 memset(&link->wr_tx_bufs[i], 0,
136 sizeof(link->wr_tx_bufs[i]));
137 clear_bit(i, link->wr_tx_mask);
138 }
Karsten Graul8799e312021-10-16 11:37:49 +0200139 if (link->lgr->smc_version == SMC_V2) {
140 memset(link->wr_tx_v2_pend, 0,
141 sizeof(*link->wr_tx_v2_pend));
142 memset(link->lgr->wr_tx_buf_v2, 0,
143 sizeof(*link->lgr->wr_tx_buf_v2));
144 }
Karsten Graul87523932020-05-01 12:48:09 +0200145 /* terminate link */
146 smcr_link_down_cond_sched(link);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100147 }
148 if (pnd_snd.handler)
149 pnd_snd.handler(&pnd_snd.priv, link, wc->status);
150 wake_up(&link->wr_tx_wait);
151}
152
Allen Paisfcb8e3a2020-11-03 14:48:22 +0530153static void smc_wr_tx_tasklet_fn(struct tasklet_struct *t)
Ursula Braunf38ba1792017-01-09 16:55:19 +0100154{
Allen Paisfcb8e3a2020-11-03 14:48:22 +0530155 struct smc_ib_device *dev = from_tasklet(dev, t, send_tasklet);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100156 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
157 int i = 0, rc;
158 int polled = 0;
159
160again:
161 polled++;
162 do {
Ursula Braun86e780d2018-01-24 10:28:15 +0100163 memset(&wc, 0, sizeof(wc));
Ursula Braunf38ba1792017-01-09 16:55:19 +0100164 rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
165 if (polled == 1) {
166 ib_req_notify_cq(dev->roce_cq_send,
167 IB_CQ_NEXT_COMP |
168 IB_CQ_REPORT_MISSED_EVENTS);
169 }
170 if (!rc)
171 break;
172 for (i = 0; i < rc; i++)
173 smc_wr_tx_process_cqe(&wc[i]);
174 } while (rc > 0);
175 if (polled == 1)
176 goto again;
177}
178
179void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
180{
181 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
182
183 tasklet_schedule(&dev->send_tasklet);
184}
185
186/*---------------------------- request submission ---------------------------*/
187
188static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
189{
190 *idx = link->wr_tx_cnt;
Karsten Graulb7eede72020-07-08 17:05:12 +0200191 if (!smc_link_usable(link))
192 return -ENOLINK;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100193 for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
194 if (!test_and_set_bit(*idx, link->wr_tx_mask))
195 return 0;
196 }
197 *idx = link->wr_tx_cnt;
198 return -EBUSY;
199}
200
201/**
202 * smc_wr_tx_get_free_slot() - returns buffer for message assembly,
203 * and sets info for pending transmit tracking
204 * @link: Pointer to smc_link used to later send the message.
205 * @handler: Send completion handler function pointer.
206 * @wr_buf: Out value returns pointer to message buffer.
Ursula Braunad6f3172019-02-04 13:44:44 +0100207 * @wr_rdma_buf: Out value returns pointer to rdma work request.
Ursula Braunf38ba1792017-01-09 16:55:19 +0100208 * @wr_pend_priv: Out value returns pointer serving as handler context.
209 *
210 * Return: 0 on success, or -errno on error.
211 */
212int smc_wr_tx_get_free_slot(struct smc_link *link,
213 smc_wr_tx_handler handler,
214 struct smc_wr_buf **wr_buf,
Ursula Braunad6f3172019-02-04 13:44:44 +0100215 struct smc_rdma_wr **wr_rdma_buf,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100216 struct smc_wr_tx_pend_priv **wr_pend_priv)
217{
Ursula Braun15e1b992019-11-14 13:02:44 +0100218 struct smc_link_group *lgr = smc_get_lgr(link);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100219 struct smc_wr_tx_pend *wr_pend;
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100220 u32 idx = link->wr_tx_cnt;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100221 struct ib_send_wr *wr_ib;
222 u64 wr_id;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100223 int rc;
224
225 *wr_buf = NULL;
226 *wr_pend_priv = NULL;
Ursula Braun15e1b992019-11-14 13:02:44 +0100227 if (in_softirq() || lgr->terminating) {
Ursula Braunf38ba1792017-01-09 16:55:19 +0100228 rc = smc_wr_tx_get_free_slot_index(link, &idx);
229 if (rc)
230 return rc;
231 } else {
Ursula Braun15e1b992019-11-14 13:02:44 +0100232 rc = wait_event_interruptible_timeout(
Ursula Braunf38ba1792017-01-09 16:55:19 +0100233 link->wr_tx_wait,
Karsten Grauld854fcb2020-04-29 17:10:43 +0200234 !smc_link_usable(link) ||
Ursula Braun15e1b992019-11-14 13:02:44 +0100235 lgr->terminating ||
Ursula Braunf38ba1792017-01-09 16:55:19 +0100236 (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
237 SMC_WR_TX_WAIT_FREE_SLOT_TIME);
238 if (!rc) {
Karsten Graul87523932020-05-01 12:48:09 +0200239 /* timeout - terminate link */
240 smcr_link_down_cond_sched(link);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100241 return -EPIPE;
242 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100243 if (idx == link->wr_tx_cnt)
244 return -EPIPE;
245 }
246 wr_id = smc_wr_tx_get_next_wr_id(link);
247 wr_pend = &link->wr_tx_pends[idx];
248 wr_pend->wr_id = wr_id;
249 wr_pend->handler = handler;
250 wr_pend->link = link;
251 wr_pend->idx = idx;
252 wr_ib = &link->wr_tx_ibs[idx];
253 wr_ib->wr_id = wr_id;
254 *wr_buf = &link->wr_tx_bufs[idx];
Ursula Braunad6f3172019-02-04 13:44:44 +0100255 if (wr_rdma_buf)
256 *wr_rdma_buf = &link->wr_tx_rdmas[idx];
Ursula Braunf38ba1792017-01-09 16:55:19 +0100257 *wr_pend_priv = &wr_pend->priv;
258 return 0;
259}
260
Karsten Graulb4ba4652021-10-16 11:37:50 +0200261int smc_wr_tx_get_v2_slot(struct smc_link *link,
262 smc_wr_tx_handler handler,
263 struct smc_wr_v2_buf **wr_buf,
264 struct smc_wr_tx_pend_priv **wr_pend_priv)
265{
266 struct smc_wr_tx_pend *wr_pend;
267 struct ib_send_wr *wr_ib;
268 u64 wr_id;
269
270 if (link->wr_tx_v2_pend->idx == link->wr_tx_cnt)
271 return -EBUSY;
272
273 *wr_buf = NULL;
274 *wr_pend_priv = NULL;
275 wr_id = smc_wr_tx_get_next_wr_id(link);
276 wr_pend = link->wr_tx_v2_pend;
277 wr_pend->wr_id = wr_id;
278 wr_pend->handler = handler;
279 wr_pend->link = link;
280 wr_pend->idx = link->wr_tx_cnt;
281 wr_ib = link->wr_tx_v2_ib;
282 wr_ib->wr_id = wr_id;
283 *wr_buf = link->lgr->wr_tx_buf_v2;
284 *wr_pend_priv = &wr_pend->priv;
285 return 0;
286}
287
Ursula Braunf38ba1792017-01-09 16:55:19 +0100288int smc_wr_tx_put_slot(struct smc_link *link,
289 struct smc_wr_tx_pend_priv *wr_pend_priv)
290{
291 struct smc_wr_tx_pend *pend;
292
293 pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
294 if (pend->idx < link->wr_tx_cnt) {
Ursula Braune438bae2018-11-20 16:46:43 +0100295 u32 idx = pend->idx;
296
Ursula Braunf38ba1792017-01-09 16:55:19 +0100297 /* clear the full struct smc_wr_tx_pend including .priv */
Karsten Graul46ad0222019-01-30 18:51:08 +0100298 memset(&link->wr_tx_pends[idx], 0,
299 sizeof(link->wr_tx_pends[idx]));
300 memset(&link->wr_tx_bufs[idx], 0,
301 sizeof(link->wr_tx_bufs[idx]));
Ursula Braune438bae2018-11-20 16:46:43 +0100302 test_and_clear_bit(idx, link->wr_tx_mask);
Ursula Braun6a37ad32019-11-14 13:02:46 +0100303 wake_up(&link->wr_tx_wait);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100304 return 1;
Karsten Graul8799e312021-10-16 11:37:49 +0200305 } else if (link->lgr->smc_version == SMC_V2 &&
306 pend->idx == link->wr_tx_cnt) {
307 /* Large v2 buffer */
308 memset(&link->wr_tx_v2_pend, 0,
309 sizeof(link->wr_tx_v2_pend));
310 memset(&link->lgr->wr_tx_buf_v2, 0,
311 sizeof(link->lgr->wr_tx_buf_v2));
312 return 1;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100313 }
314
315 return 0;
316}
317
318/* Send prepared WR slot via ib_post_send.
319 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
320 */
321int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
322{
Ursula Braunf38ba1792017-01-09 16:55:19 +0100323 struct smc_wr_tx_pend *pend;
324 int rc;
325
326 ib_req_notify_cq(link->smcibdev->roce_cq_send,
Ursula Braun8301fa42017-09-21 09:16:30 +0200327 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100328 pend = container_of(priv, struct smc_wr_tx_pend, priv);
Bart Van Assche2e3bbe42018-07-18 09:25:30 -0700329 rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL);
Ursula Braunb4772b32018-01-25 11:15:33 +0100330 if (rc) {
Ursula Braunf38ba1792017-01-09 16:55:19 +0100331 smc_wr_tx_put_slot(link, priv);
Karsten Graul87523932020-05-01 12:48:09 +0200332 smcr_link_down_cond_sched(link);
Ursula Braunb4772b32018-01-25 11:15:33 +0100333 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100334 return rc;
335}
336
Karsten Graulb4ba4652021-10-16 11:37:50 +0200337int smc_wr_tx_v2_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
338 int len)
339{
340 int rc;
341
342 link->wr_tx_v2_ib->sg_list[0].length = len;
343 ib_req_notify_cq(link->smcibdev->roce_cq_send,
344 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
345 rc = ib_post_send(link->roce_qp, link->wr_tx_v2_ib, NULL);
346 if (rc) {
347 smc_wr_tx_put_slot(link, priv);
348 smcr_link_down_cond_sched(link);
349 }
350 return rc;
351}
352
Karsten Graul09c61d22020-05-04 14:18:41 +0200353/* Send prepared WR slot via ib_post_send and wait for send completion
354 * notification.
355 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
356 */
357int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv,
358 unsigned long timeout)
359{
360 struct smc_wr_tx_pend *pend;
Karsten Graul6d7373d2021-12-27 14:35:30 +0100361 u32 pnd_idx;
Karsten Graul09c61d22020-05-04 14:18:41 +0200362 int rc;
363
364 pend = container_of(priv, struct smc_wr_tx_pend, priv);
365 pend->compl_requested = 1;
Karsten Graul6d7373d2021-12-27 14:35:30 +0100366 pnd_idx = pend->idx;
367 init_completion(&link->wr_tx_compl[pnd_idx]);
Karsten Graul09c61d22020-05-04 14:18:41 +0200368
369 rc = smc_wr_tx_send(link, priv);
370 if (rc)
371 return rc;
372 /* wait for completion by smc_wr_tx_process_cqe() */
373 rc = wait_for_completion_interruptible_timeout(
Karsten Graul6d7373d2021-12-27 14:35:30 +0100374 &link->wr_tx_compl[pnd_idx], timeout);
Karsten Graul09c61d22020-05-04 14:18:41 +0200375 if (rc <= 0)
376 rc = -ENODATA;
377 if (rc > 0)
378 rc = 0;
379 return rc;
380}
381
Ursula Braun652a1e42017-07-28 13:56:17 +0200382/* Register a memory region and wait for result. */
383int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
384{
Ursula Braun652a1e42017-07-28 13:56:17 +0200385 int rc;
386
387 ib_req_notify_cq(link->smcibdev->roce_cq_send,
388 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
389 link->wr_reg_state = POSTED;
390 link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr;
391 link->wr_reg.mr = mr;
392 link->wr_reg.key = mr->rkey;
Bart Van Assche2e3bbe42018-07-18 09:25:30 -0700393 rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, NULL);
Ursula Braun652a1e42017-07-28 13:56:17 +0200394 if (rc)
395 return rc;
396
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200397 atomic_inc(&link->wr_reg_refcnt);
Ursula Braun652a1e42017-07-28 13:56:17 +0200398 rc = wait_event_interruptible_timeout(link->wr_reg_wait,
399 (link->wr_reg_state != POSTED),
400 SMC_WR_REG_MR_WAIT_TIME);
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200401 if (atomic_dec_and_test(&link->wr_reg_refcnt))
402 wake_up_all(&link->wr_reg_wait);
Ursula Braun652a1e42017-07-28 13:56:17 +0200403 if (!rc) {
Karsten Graul87523932020-05-01 12:48:09 +0200404 /* timeout - terminate link */
405 smcr_link_down_cond_sched(link);
Ursula Braun652a1e42017-07-28 13:56:17 +0200406 return -EPIPE;
407 }
408 if (rc == -ERESTARTSYS)
409 return -EINTR;
410 switch (link->wr_reg_state) {
411 case CONFIRMED:
412 rc = 0;
413 break;
414 case FAILED:
415 rc = -EIO;
416 break;
417 case POSTED:
418 rc = -EPIPE;
419 break;
420 }
421 return rc;
422}
423
Ursula Braun86e780d2018-01-24 10:28:15 +0100424void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
Ursula Braun5f083182017-01-09 16:55:22 +0100425 smc_wr_tx_filter filter,
426 smc_wr_tx_dismisser dismisser,
427 unsigned long data)
428{
429 struct smc_wr_tx_pend_priv *tx_pend;
Ursula Braun86e780d2018-01-24 10:28:15 +0100430 struct smc_wr_rx_hdr *wr_tx;
Ursula Braun5f083182017-01-09 16:55:22 +0100431 int i;
432
433 for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
Ursula Braun86e780d2018-01-24 10:28:15 +0100434 wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
435 if (wr_tx->type != wr_tx_hdr_type)
Ursula Braun5f083182017-01-09 16:55:22 +0100436 continue;
437 tx_pend = &link->wr_tx_pends[i].priv;
438 if (filter(tx_pend, data))
439 dismisser(tx_pend);
440 }
441}
442
Ursula Braunf38ba1792017-01-09 16:55:19 +0100443/****************************** receive queue ********************************/
444
445int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
446{
447 struct smc_wr_rx_handler *h_iter;
448 int rc = 0;
449
450 spin_lock(&smc_wr_rx_hash_lock);
451 hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) {
452 if (h_iter->type == handler->type) {
453 rc = -EEXIST;
454 goto out_unlock;
455 }
456 }
457 hash_add(smc_wr_rx_hash, &handler->list, handler->type);
458out_unlock:
459 spin_unlock(&smc_wr_rx_hash_lock);
460 return rc;
461}
462
463/* Demultiplex a received work request based on the message type to its handler.
464 * Relies on smc_wr_rx_hash having been completely filled before any IB WRs,
465 * and not being modified any more afterwards so we don't need to lock it.
466 */
467static inline void smc_wr_rx_demultiplex(struct ib_wc *wc)
468{
469 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
470 struct smc_wr_rx_handler *handler;
471 struct smc_wr_rx_hdr *wr_rx;
472 u64 temp_wr_id;
473 u32 index;
474
475 if (wc->byte_len < sizeof(*wr_rx))
476 return; /* short message */
477 temp_wr_id = wc->wr_id;
478 index = do_div(temp_wr_id, link->wr_rx_cnt);
479 wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index];
480 hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) {
481 if (handler->type == wr_rx->type)
482 handler->handler(wc, wr_rx);
483 }
484}
485
486static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
487{
488 struct smc_link *link;
489 int i;
490
491 for (i = 0; i < num; i++) {
492 link = wc[i].qp->qp_context;
493 if (wc[i].status == IB_WC_SUCCESS) {
Karsten Graul877ae5b2018-05-02 16:56:44 +0200494 link->wr_rx_tstamp = jiffies;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100495 smc_wr_rx_demultiplex(&wc[i]);
496 smc_wr_rx_post(link); /* refill WR RX */
497 } else {
498 /* handle status errors */
499 switch (wc[i].status) {
500 case IB_WC_RETRY_EXC_ERR:
501 case IB_WC_RNR_RETRY_EXC_ERR:
502 case IB_WC_WR_FLUSH_ERR:
Karsten Graul87523932020-05-01 12:48:09 +0200503 smcr_link_down_cond_sched(link);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100504 break;
505 default:
506 smc_wr_rx_post(link); /* refill WR RX */
507 break;
508 }
509 }
510 }
511}
512
Allen Paisfcb8e3a2020-11-03 14:48:22 +0530513static void smc_wr_rx_tasklet_fn(struct tasklet_struct *t)
Ursula Braunf38ba1792017-01-09 16:55:19 +0100514{
Allen Paisfcb8e3a2020-11-03 14:48:22 +0530515 struct smc_ib_device *dev = from_tasklet(dev, t, recv_tasklet);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100516 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
517 int polled = 0;
518 int rc;
519
520again:
521 polled++;
522 do {
523 memset(&wc, 0, sizeof(wc));
524 rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
525 if (polled == 1) {
526 ib_req_notify_cq(dev->roce_cq_recv,
527 IB_CQ_SOLICITED_MASK
528 | IB_CQ_REPORT_MISSED_EVENTS);
529 }
530 if (!rc)
531 break;
532 smc_wr_rx_process_cqes(&wc[0], rc);
533 } while (rc > 0);
534 if (polled == 1)
535 goto again;
536}
537
538void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
539{
540 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
541
542 tasklet_schedule(&dev->recv_tasklet);
543}
544
545int smc_wr_rx_post_init(struct smc_link *link)
546{
547 u32 i;
548 int rc = 0;
549
550 for (i = 0; i < link->wr_rx_cnt; i++)
551 rc = smc_wr_rx_post(link);
552 return rc;
553}
554
555/***************************** init, exit, misc ******************************/
556
557void smc_wr_remember_qp_attr(struct smc_link *lnk)
558{
559 struct ib_qp_attr *attr = &lnk->qp_attr;
560 struct ib_qp_init_attr init_attr;
561
562 memset(attr, 0, sizeof(*attr));
563 memset(&init_attr, 0, sizeof(init_attr));
564 ib_query_qp(lnk->roce_qp, attr,
565 IB_QP_STATE |
566 IB_QP_CUR_STATE |
567 IB_QP_PKEY_INDEX |
568 IB_QP_PORT |
569 IB_QP_QKEY |
570 IB_QP_AV |
571 IB_QP_PATH_MTU |
572 IB_QP_TIMEOUT |
573 IB_QP_RETRY_CNT |
574 IB_QP_RNR_RETRY |
575 IB_QP_RQ_PSN |
576 IB_QP_ALT_PATH |
577 IB_QP_MIN_RNR_TIMER |
578 IB_QP_SQ_PSN |
579 IB_QP_PATH_MIG_STATE |
580 IB_QP_CAP |
581 IB_QP_DEST_QPN,
582 &init_attr);
583
584 lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT,
585 lnk->qp_attr.cap.max_send_wr);
586 lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3,
587 lnk->qp_attr.cap.max_recv_wr);
588}
589
590static void smc_wr_init_sge(struct smc_link *lnk)
591{
Karsten Graul8799e312021-10-16 11:37:49 +0200592 int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100593 u32 i;
594
595 for (i = 0; i < lnk->wr_tx_cnt; i++) {
596 lnk->wr_tx_sges[i].addr =
597 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
598 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100599 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
Ursula Braunad6f3172019-02-04 13:44:44 +0100600 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey =
601 lnk->roce_pd->local_dma_lkey;
602 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey =
603 lnk->roce_pd->local_dma_lkey;
604 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey =
605 lnk->roce_pd->local_dma_lkey;
606 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey =
607 lnk->roce_pd->local_dma_lkey;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100608 lnk->wr_tx_ibs[i].next = NULL;
609 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
610 lnk->wr_tx_ibs[i].num_sge = 1;
611 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
612 lnk->wr_tx_ibs[i].send_flags =
Ursula Braun2c9c1682017-04-10 14:58:05 +0200613 IB_SEND_SIGNALED | IB_SEND_SOLICITED;
Ursula Braunad6f3172019-02-04 13:44:44 +0100614 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE;
615 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE;
616 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list =
617 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge;
618 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list =
619 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100620 }
Karsten Graul8799e312021-10-16 11:37:49 +0200621
622 if (lnk->lgr->smc_version == SMC_V2) {
623 lnk->wr_tx_v2_sge->addr = lnk->wr_tx_v2_dma_addr;
624 lnk->wr_tx_v2_sge->length = SMC_WR_BUF_V2_SIZE;
625 lnk->wr_tx_v2_sge->lkey = lnk->roce_pd->local_dma_lkey;
626
627 lnk->wr_tx_v2_ib->next = NULL;
628 lnk->wr_tx_v2_ib->sg_list = lnk->wr_tx_v2_sge;
629 lnk->wr_tx_v2_ib->num_sge = 1;
630 lnk->wr_tx_v2_ib->opcode = IB_WR_SEND;
631 lnk->wr_tx_v2_ib->send_flags =
632 IB_SEND_SIGNALED | IB_SEND_SOLICITED;
633 }
634
635 /* With SMC-Rv2 there can be messages larger than SMC_WR_TX_SIZE.
636 * Each ib_recv_wr gets 2 sges, the second one is a spillover buffer
637 * and the same buffer for all sges. When a larger message arrived then
638 * the content of the first small sge is copied to the beginning of
639 * the larger spillover buffer, allowing easy data mapping.
640 */
Ursula Braunf38ba1792017-01-09 16:55:19 +0100641 for (i = 0; i < lnk->wr_rx_cnt; i++) {
Karsten Graul8799e312021-10-16 11:37:49 +0200642 int x = i * sges_per_buf;
643
644 lnk->wr_rx_sges[x].addr =
Ursula Braunf38ba1792017-01-09 16:55:19 +0100645 lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
Karsten Graul8799e312021-10-16 11:37:49 +0200646 lnk->wr_rx_sges[x].length = SMC_WR_TX_SIZE;
647 lnk->wr_rx_sges[x].lkey = lnk->roce_pd->local_dma_lkey;
648 if (lnk->lgr->smc_version == SMC_V2) {
649 lnk->wr_rx_sges[x + 1].addr =
650 lnk->wr_rx_v2_dma_addr + SMC_WR_TX_SIZE;
651 lnk->wr_rx_sges[x + 1].length =
652 SMC_WR_BUF_V2_SIZE - SMC_WR_TX_SIZE;
653 lnk->wr_rx_sges[x + 1].lkey =
654 lnk->roce_pd->local_dma_lkey;
655 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100656 lnk->wr_rx_ibs[i].next = NULL;
Karsten Graul8799e312021-10-16 11:37:49 +0200657 lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[x];
658 lnk->wr_rx_ibs[i].num_sge = sges_per_buf;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100659 }
Ursula Braun652a1e42017-07-28 13:56:17 +0200660 lnk->wr_reg.wr.next = NULL;
661 lnk->wr_reg.wr.num_sge = 0;
662 lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED;
663 lnk->wr_reg.wr.opcode = IB_WR_REG_MR;
664 lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100665}
666
667void smc_wr_free_link(struct smc_link *lnk)
668{
669 struct ib_device *ibdev;
670
Karsten Graulb7eede72020-07-08 17:05:12 +0200671 if (!lnk->smcibdev)
672 return;
673 ibdev = lnk->smcibdev->ibdev;
674
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200675 smc_wr_wakeup_reg_wait(lnk);
676 smc_wr_wakeup_tx_wait(lnk);
677
Ursula Braun6a37ad32019-11-14 13:02:46 +0100678 if (smc_wr_tx_wait_no_pending_sends(lnk))
679 memset(lnk->wr_tx_mask, 0,
680 BITS_TO_LONGS(SMC_WR_BUF_CNT) *
681 sizeof(*lnk->wr_tx_mask));
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200682 wait_event(lnk->wr_reg_wait, (!atomic_read(&lnk->wr_reg_refcnt)));
683 wait_event(lnk->wr_tx_wait, (!atomic_read(&lnk->wr_tx_refcnt)));
Ursula Braunf38ba1792017-01-09 16:55:19 +0100684
Ursula Braunf38ba1792017-01-09 16:55:19 +0100685 if (lnk->wr_rx_dma_addr) {
686 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
687 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
688 DMA_FROM_DEVICE);
689 lnk->wr_rx_dma_addr = 0;
690 }
Karsten Graul8799e312021-10-16 11:37:49 +0200691 if (lnk->wr_rx_v2_dma_addr) {
692 ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr,
693 SMC_WR_BUF_V2_SIZE,
694 DMA_FROM_DEVICE);
695 lnk->wr_rx_v2_dma_addr = 0;
696 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100697 if (lnk->wr_tx_dma_addr) {
698 ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr,
699 SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
700 DMA_TO_DEVICE);
701 lnk->wr_tx_dma_addr = 0;
702 }
Karsten Graul8799e312021-10-16 11:37:49 +0200703 if (lnk->wr_tx_v2_dma_addr) {
704 ib_dma_unmap_single(ibdev, lnk->wr_tx_v2_dma_addr,
705 SMC_WR_BUF_V2_SIZE,
706 DMA_TO_DEVICE);
707 lnk->wr_tx_v2_dma_addr = 0;
708 }
709}
710
711void smc_wr_free_lgr_mem(struct smc_link_group *lgr)
712{
713 if (lgr->smc_version < SMC_V2)
714 return;
715
716 kfree(lgr->wr_rx_buf_v2);
717 lgr->wr_rx_buf_v2 = NULL;
718 kfree(lgr->wr_tx_buf_v2);
719 lgr->wr_tx_buf_v2 = NULL;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100720}
721
722void smc_wr_free_link_mem(struct smc_link *lnk)
723{
Karsten Graul8799e312021-10-16 11:37:49 +0200724 kfree(lnk->wr_tx_v2_ib);
725 lnk->wr_tx_v2_ib = NULL;
726 kfree(lnk->wr_tx_v2_sge);
727 lnk->wr_tx_v2_sge = NULL;
728 kfree(lnk->wr_tx_v2_pend);
729 lnk->wr_tx_v2_pend = NULL;
Karsten Graul09c61d22020-05-04 14:18:41 +0200730 kfree(lnk->wr_tx_compl);
731 lnk->wr_tx_compl = NULL;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100732 kfree(lnk->wr_tx_pends);
733 lnk->wr_tx_pends = NULL;
734 kfree(lnk->wr_tx_mask);
735 lnk->wr_tx_mask = NULL;
736 kfree(lnk->wr_tx_sges);
737 lnk->wr_tx_sges = NULL;
Ursula Braunad6f3172019-02-04 13:44:44 +0100738 kfree(lnk->wr_tx_rdma_sges);
739 lnk->wr_tx_rdma_sges = NULL;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100740 kfree(lnk->wr_rx_sges);
741 lnk->wr_rx_sges = NULL;
Ursula Braunad6f3172019-02-04 13:44:44 +0100742 kfree(lnk->wr_tx_rdmas);
743 lnk->wr_tx_rdmas = NULL;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100744 kfree(lnk->wr_rx_ibs);
745 lnk->wr_rx_ibs = NULL;
746 kfree(lnk->wr_tx_ibs);
747 lnk->wr_tx_ibs = NULL;
748 kfree(lnk->wr_tx_bufs);
749 lnk->wr_tx_bufs = NULL;
750 kfree(lnk->wr_rx_bufs);
751 lnk->wr_rx_bufs = NULL;
752}
753
Karsten Graul8799e312021-10-16 11:37:49 +0200754int smc_wr_alloc_lgr_mem(struct smc_link_group *lgr)
755{
756 if (lgr->smc_version < SMC_V2)
757 return 0;
758
759 lgr->wr_rx_buf_v2 = kzalloc(SMC_WR_BUF_V2_SIZE, GFP_KERNEL);
760 if (!lgr->wr_rx_buf_v2)
761 return -ENOMEM;
762 lgr->wr_tx_buf_v2 = kzalloc(SMC_WR_BUF_V2_SIZE, GFP_KERNEL);
763 if (!lgr->wr_tx_buf_v2) {
764 kfree(lgr->wr_rx_buf_v2);
765 return -ENOMEM;
766 }
767 return 0;
768}
769
Ursula Braunf38ba1792017-01-09 16:55:19 +0100770int smc_wr_alloc_link_mem(struct smc_link *link)
771{
Karsten Graul8799e312021-10-16 11:37:49 +0200772 int sges_per_buf = link->lgr->smc_version == SMC_V2 ? 2 : 1;
773
Ursula Braunf38ba1792017-01-09 16:55:19 +0100774 /* allocate link related memory */
775 link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
776 if (!link->wr_tx_bufs)
777 goto no_mem;
778 link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE,
779 GFP_KERNEL);
780 if (!link->wr_rx_bufs)
781 goto no_mem_wr_tx_bufs;
782 link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]),
783 GFP_KERNEL);
784 if (!link->wr_tx_ibs)
785 goto no_mem_wr_rx_bufs;
786 link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3,
787 sizeof(link->wr_rx_ibs[0]),
788 GFP_KERNEL);
789 if (!link->wr_rx_ibs)
790 goto no_mem_wr_tx_ibs;
Ursula Braunad6f3172019-02-04 13:44:44 +0100791 link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT,
792 sizeof(link->wr_tx_rdmas[0]),
793 GFP_KERNEL);
794 if (!link->wr_tx_rdmas)
795 goto no_mem_wr_rx_ibs;
796 link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT,
797 sizeof(link->wr_tx_rdma_sges[0]),
798 GFP_KERNEL);
799 if (!link->wr_tx_rdma_sges)
800 goto no_mem_wr_tx_rdmas;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100801 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
802 GFP_KERNEL);
803 if (!link->wr_tx_sges)
Ursula Braunad6f3172019-02-04 13:44:44 +0100804 goto no_mem_wr_tx_rdma_sges;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100805 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
Karsten Graul8799e312021-10-16 11:37:49 +0200806 sizeof(link->wr_rx_sges[0]) * sges_per_buf,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100807 GFP_KERNEL);
808 if (!link->wr_rx_sges)
809 goto no_mem_wr_tx_sges;
Kees Cook6396bb22018-06-12 14:03:40 -0700810 link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT),
811 sizeof(*link->wr_tx_mask),
812 GFP_KERNEL);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100813 if (!link->wr_tx_mask)
814 goto no_mem_wr_rx_sges;
815 link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
816 sizeof(link->wr_tx_pends[0]),
817 GFP_KERNEL);
818 if (!link->wr_tx_pends)
819 goto no_mem_wr_tx_mask;
Karsten Graul09c61d22020-05-04 14:18:41 +0200820 link->wr_tx_compl = kcalloc(SMC_WR_BUF_CNT,
821 sizeof(link->wr_tx_compl[0]),
822 GFP_KERNEL);
823 if (!link->wr_tx_compl)
824 goto no_mem_wr_tx_pends;
Karsten Graul8799e312021-10-16 11:37:49 +0200825
826 if (link->lgr->smc_version == SMC_V2) {
827 link->wr_tx_v2_ib = kzalloc(sizeof(*link->wr_tx_v2_ib),
828 GFP_KERNEL);
829 if (!link->wr_tx_v2_ib)
830 goto no_mem_tx_compl;
831 link->wr_tx_v2_sge = kzalloc(sizeof(*link->wr_tx_v2_sge),
832 GFP_KERNEL);
833 if (!link->wr_tx_v2_sge)
834 goto no_mem_v2_ib;
835 link->wr_tx_v2_pend = kzalloc(sizeof(*link->wr_tx_v2_pend),
836 GFP_KERNEL);
837 if (!link->wr_tx_v2_pend)
838 goto no_mem_v2_sge;
839 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100840 return 0;
841
Karsten Graul8799e312021-10-16 11:37:49 +0200842no_mem_v2_sge:
843 kfree(link->wr_tx_v2_sge);
844no_mem_v2_ib:
845 kfree(link->wr_tx_v2_ib);
846no_mem_tx_compl:
847 kfree(link->wr_tx_compl);
Karsten Graul09c61d22020-05-04 14:18:41 +0200848no_mem_wr_tx_pends:
849 kfree(link->wr_tx_pends);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100850no_mem_wr_tx_mask:
851 kfree(link->wr_tx_mask);
852no_mem_wr_rx_sges:
853 kfree(link->wr_rx_sges);
854no_mem_wr_tx_sges:
855 kfree(link->wr_tx_sges);
Ursula Braunad6f3172019-02-04 13:44:44 +0100856no_mem_wr_tx_rdma_sges:
857 kfree(link->wr_tx_rdma_sges);
858no_mem_wr_tx_rdmas:
859 kfree(link->wr_tx_rdmas);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100860no_mem_wr_rx_ibs:
861 kfree(link->wr_rx_ibs);
862no_mem_wr_tx_ibs:
863 kfree(link->wr_tx_ibs);
864no_mem_wr_rx_bufs:
865 kfree(link->wr_rx_bufs);
866no_mem_wr_tx_bufs:
867 kfree(link->wr_tx_bufs);
868no_mem:
869 return -ENOMEM;
870}
871
872void smc_wr_remove_dev(struct smc_ib_device *smcibdev)
873{
874 tasklet_kill(&smcibdev->recv_tasklet);
875 tasklet_kill(&smcibdev->send_tasklet);
876}
877
878void smc_wr_add_dev(struct smc_ib_device *smcibdev)
879{
Allen Paisfcb8e3a2020-11-03 14:48:22 +0530880 tasklet_setup(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn);
881 tasklet_setup(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100882}
883
884int smc_wr_create_link(struct smc_link *lnk)
885{
886 struct ib_device *ibdev = lnk->smcibdev->ibdev;
887 int rc = 0;
888
889 smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0);
890 lnk->wr_rx_id = 0;
891 lnk->wr_rx_dma_addr = ib_dma_map_single(
892 ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
893 DMA_FROM_DEVICE);
894 if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) {
895 lnk->wr_rx_dma_addr = 0;
896 rc = -EIO;
897 goto out;
898 }
Karsten Graul8799e312021-10-16 11:37:49 +0200899 if (lnk->lgr->smc_version == SMC_V2) {
900 lnk->wr_rx_v2_dma_addr = ib_dma_map_single(ibdev,
901 lnk->lgr->wr_rx_buf_v2, SMC_WR_BUF_V2_SIZE,
902 DMA_FROM_DEVICE);
903 if (ib_dma_mapping_error(ibdev, lnk->wr_rx_v2_dma_addr)) {
904 lnk->wr_rx_v2_dma_addr = 0;
905 rc = -EIO;
906 goto dma_unmap;
907 }
908 lnk->wr_tx_v2_dma_addr = ib_dma_map_single(ibdev,
909 lnk->lgr->wr_tx_buf_v2, SMC_WR_BUF_V2_SIZE,
910 DMA_TO_DEVICE);
911 if (ib_dma_mapping_error(ibdev, lnk->wr_tx_v2_dma_addr)) {
912 lnk->wr_tx_v2_dma_addr = 0;
913 rc = -EIO;
914 goto dma_unmap;
915 }
916 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100917 lnk->wr_tx_dma_addr = ib_dma_map_single(
918 ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
919 DMA_TO_DEVICE);
920 if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) {
921 rc = -EIO;
922 goto dma_unmap;
923 }
924 smc_wr_init_sge(lnk);
925 memset(lnk->wr_tx_mask, 0,
926 BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
Ursula Braun652a1e42017-07-28 13:56:17 +0200927 init_waitqueue_head(&lnk->wr_tx_wait);
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200928 atomic_set(&lnk->wr_tx_refcnt, 0);
Ursula Braun652a1e42017-07-28 13:56:17 +0200929 init_waitqueue_head(&lnk->wr_reg_wait);
Karsten Graul8f3d65c2021-08-09 11:05:56 +0200930 atomic_set(&lnk->wr_reg_refcnt, 0);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100931 return rc;
932
933dma_unmap:
Karsten Graul8799e312021-10-16 11:37:49 +0200934 if (lnk->wr_rx_v2_dma_addr) {
935 ib_dma_unmap_single(ibdev, lnk->wr_rx_v2_dma_addr,
936 SMC_WR_BUF_V2_SIZE,
937 DMA_FROM_DEVICE);
938 lnk->wr_rx_v2_dma_addr = 0;
939 }
940 if (lnk->wr_tx_v2_dma_addr) {
941 ib_dma_unmap_single(ibdev, lnk->wr_tx_v2_dma_addr,
942 SMC_WR_BUF_V2_SIZE,
943 DMA_TO_DEVICE);
944 lnk->wr_tx_v2_dma_addr = 0;
945 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100946 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
947 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
948 DMA_FROM_DEVICE);
949 lnk->wr_rx_dma_addr = 0;
950out:
951 return rc;
952}