blob: de1a438cf9773f85413a2ce53c8581cee5078a31 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Braunf38ba1792017-01-09 16:55:19 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * Work Requests exploiting Infiniband API
6 *
7 * Work requests (WR) of type ib_post_send or ib_post_recv respectively
8 * are submitted to either RC SQ or RC RQ respectively
9 * (reliably connected send/receive queue)
10 * and become work queue entries (WQEs).
11 * While an SQ WR/WQE is pending, we track it until transmission completion.
12 * Through a send or receive completion queue (CQ) respectively,
13 * we get completion queue entries (CQEs) [aka work completions (WCs)].
14 * Since the CQ callback is called from IRQ context, we split work by using
15 * bottom halves implemented by tasklets.
16 *
17 * SMC uses this to exchange LLC (link layer control)
18 * and CDC (connection data control) messages.
19 *
20 * Copyright IBM Corp. 2016
21 *
22 * Author(s): Steffen Maier <maier@linux.vnet.ibm.com>
23 */
24
25#include <linux/atomic.h>
26#include <linux/hashtable.h>
27#include <linux/wait.h>
28#include <rdma/ib_verbs.h>
29#include <asm/div64.h>
30
31#include "smc.h"
32#include "smc_wr.h"
33
34#define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */
35
36#define SMC_WR_RX_HASH_BITS 4
37static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS);
38static DEFINE_SPINLOCK(smc_wr_rx_hash_lock);
39
40struct smc_wr_tx_pend { /* control data for a pending send request */
41 u64 wr_id; /* work request id sent */
42 smc_wr_tx_handler handler;
43 enum ib_wc_status wc_status; /* CQE status */
44 struct smc_link *link;
45 u32 idx;
46 struct smc_wr_tx_pend_priv priv;
47};
48
49/******************************** send queue *********************************/
50
51/*------------------------------- completion --------------------------------*/
52
53static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id)
54{
55 u32 i;
56
57 for (i = 0; i < link->wr_tx_cnt; i++) {
58 if (link->wr_tx_pends[i].wr_id == wr_id)
59 return i;
60 }
61 return link->wr_tx_cnt;
62}
63
64static inline void smc_wr_tx_process_cqe(struct ib_wc *wc)
65{
66 struct smc_wr_tx_pend pnd_snd;
67 struct smc_link *link;
68 u32 pnd_snd_idx;
69 int i;
70
71 link = wc->qp->qp_context;
Ursula Braun652a1e42017-07-28 13:56:17 +020072
73 if (wc->opcode == IB_WC_REG_MR) {
74 if (wc->status)
75 link->wr_reg_state = FAILED;
76 else
77 link->wr_reg_state = CONFIRMED;
78 wake_up(&link->wr_reg_wait);
79 return;
80 }
81
Ursula Braunf38ba1792017-01-09 16:55:19 +010082 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id);
83 if (pnd_snd_idx == link->wr_tx_cnt)
84 return;
85 link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status;
86 memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd));
87 /* clear the full struct smc_wr_tx_pend including .priv */
88 memset(&link->wr_tx_pends[pnd_snd_idx], 0,
89 sizeof(link->wr_tx_pends[pnd_snd_idx]));
90 memset(&link->wr_tx_bufs[pnd_snd_idx], 0,
91 sizeof(link->wr_tx_bufs[pnd_snd_idx]));
92 if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask))
93 return;
94 if (wc->status) {
Ursula Braunb38d7322017-01-09 16:55:25 +010095 struct smc_link_group *lgr;
96
Ursula Braunf38ba1792017-01-09 16:55:19 +010097 for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
98 /* clear full struct smc_wr_tx_pend including .priv */
99 memset(&link->wr_tx_pends[i], 0,
100 sizeof(link->wr_tx_pends[i]));
101 memset(&link->wr_tx_bufs[i], 0,
102 sizeof(link->wr_tx_bufs[i]));
103 clear_bit(i, link->wr_tx_mask);
104 }
Ursula Braunb38d7322017-01-09 16:55:25 +0100105 /* terminate connections of this link group abnormally */
106 lgr = container_of(link, struct smc_link_group,
107 lnk[SMC_SINGLE_LINK]);
108 smc_lgr_terminate(lgr);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100109 }
110 if (pnd_snd.handler)
111 pnd_snd.handler(&pnd_snd.priv, link, wc->status);
112 wake_up(&link->wr_tx_wait);
113}
114
115static void smc_wr_tx_tasklet_fn(unsigned long data)
116{
117 struct smc_ib_device *dev = (struct smc_ib_device *)data;
118 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
119 int i = 0, rc;
120 int polled = 0;
121
122again:
123 polled++;
124 do {
Ursula Braun86e780d2018-01-24 10:28:15 +0100125 memset(&wc, 0, sizeof(wc));
Ursula Braunf38ba1792017-01-09 16:55:19 +0100126 rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc);
127 if (polled == 1) {
128 ib_req_notify_cq(dev->roce_cq_send,
129 IB_CQ_NEXT_COMP |
130 IB_CQ_REPORT_MISSED_EVENTS);
131 }
132 if (!rc)
133 break;
134 for (i = 0; i < rc; i++)
135 smc_wr_tx_process_cqe(&wc[i]);
136 } while (rc > 0);
137 if (polled == 1)
138 goto again;
139}
140
141void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
142{
143 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
144
145 tasklet_schedule(&dev->send_tasklet);
146}
147
148/*---------------------------- request submission ---------------------------*/
149
150static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx)
151{
152 *idx = link->wr_tx_cnt;
153 for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) {
154 if (!test_and_set_bit(*idx, link->wr_tx_mask))
155 return 0;
156 }
157 *idx = link->wr_tx_cnt;
158 return -EBUSY;
159}
160
161/**
162 * smc_wr_tx_get_free_slot() - returns buffer for message assembly,
163 * and sets info for pending transmit tracking
164 * @link: Pointer to smc_link used to later send the message.
165 * @handler: Send completion handler function pointer.
166 * @wr_buf: Out value returns pointer to message buffer.
167 * @wr_pend_priv: Out value returns pointer serving as handler context.
168 *
169 * Return: 0 on success, or -errno on error.
170 */
171int smc_wr_tx_get_free_slot(struct smc_link *link,
172 smc_wr_tx_handler handler,
173 struct smc_wr_buf **wr_buf,
174 struct smc_wr_tx_pend_priv **wr_pend_priv)
175{
176 struct smc_wr_tx_pend *wr_pend;
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100177 u32 idx = link->wr_tx_cnt;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100178 struct ib_send_wr *wr_ib;
179 u64 wr_id;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100180 int rc;
181
182 *wr_buf = NULL;
183 *wr_pend_priv = NULL;
184 if (in_softirq()) {
185 rc = smc_wr_tx_get_free_slot_index(link, &idx);
186 if (rc)
187 return rc;
188 } else {
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100189 struct smc_link_group *lgr;
190
191 lgr = container_of(link, struct smc_link_group,
192 lnk[SMC_SINGLE_LINK]);
Ursula Braun86e780d2018-01-24 10:28:15 +0100193 rc = wait_event_timeout(
Ursula Braunf38ba1792017-01-09 16:55:19 +0100194 link->wr_tx_wait,
Ursula Braun1a0a04c2018-01-25 11:15:36 +0100195 list_empty(&lgr->list) || /* lgr terminated */
Ursula Braunf38ba1792017-01-09 16:55:19 +0100196 (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY),
197 SMC_WR_TX_WAIT_FREE_SLOT_TIME);
198 if (!rc) {
Ursula Braunb38d7322017-01-09 16:55:25 +0100199 /* timeout - terminate connections */
Ursula Braunb38d7322017-01-09 16:55:25 +0100200 smc_lgr_terminate(lgr);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100201 return -EPIPE;
202 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100203 if (idx == link->wr_tx_cnt)
204 return -EPIPE;
205 }
206 wr_id = smc_wr_tx_get_next_wr_id(link);
207 wr_pend = &link->wr_tx_pends[idx];
208 wr_pend->wr_id = wr_id;
209 wr_pend->handler = handler;
210 wr_pend->link = link;
211 wr_pend->idx = idx;
212 wr_ib = &link->wr_tx_ibs[idx];
213 wr_ib->wr_id = wr_id;
214 *wr_buf = &link->wr_tx_bufs[idx];
215 *wr_pend_priv = &wr_pend->priv;
216 return 0;
217}
218
219int smc_wr_tx_put_slot(struct smc_link *link,
220 struct smc_wr_tx_pend_priv *wr_pend_priv)
221{
222 struct smc_wr_tx_pend *pend;
223
224 pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv);
225 if (pend->idx < link->wr_tx_cnt) {
226 /* clear the full struct smc_wr_tx_pend including .priv */
227 memset(&link->wr_tx_pends[pend->idx], 0,
228 sizeof(link->wr_tx_pends[pend->idx]));
229 memset(&link->wr_tx_bufs[pend->idx], 0,
230 sizeof(link->wr_tx_bufs[pend->idx]));
231 test_and_clear_bit(pend->idx, link->wr_tx_mask);
232 return 1;
233 }
234
235 return 0;
236}
237
238/* Send prepared WR slot via ib_post_send.
239 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer
240 */
241int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv)
242{
Ursula Braunf38ba1792017-01-09 16:55:19 +0100243 struct smc_wr_tx_pend *pend;
244 int rc;
245
246 ib_req_notify_cq(link->smcibdev->roce_cq_send,
Ursula Braun8301fa42017-09-21 09:16:30 +0200247 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100248 pend = container_of(priv, struct smc_wr_tx_pend, priv);
Bart Van Assche2e3bbe42018-07-18 09:25:30 -0700249 rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL);
Ursula Braunb4772b32018-01-25 11:15:33 +0100250 if (rc) {
251 struct smc_link_group *lgr =
252 container_of(link, struct smc_link_group,
253 lnk[SMC_SINGLE_LINK]);
254
Ursula Braunf38ba1792017-01-09 16:55:19 +0100255 smc_wr_tx_put_slot(link, priv);
Ursula Braunb4772b32018-01-25 11:15:33 +0100256 smc_lgr_terminate(lgr);
257 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100258 return rc;
259}
260
Ursula Braun652a1e42017-07-28 13:56:17 +0200261/* Register a memory region and wait for result. */
262int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr)
263{
Ursula Braun652a1e42017-07-28 13:56:17 +0200264 int rc;
265
266 ib_req_notify_cq(link->smcibdev->roce_cq_send,
267 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
268 link->wr_reg_state = POSTED;
269 link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr;
270 link->wr_reg.mr = mr;
271 link->wr_reg.key = mr->rkey;
Bart Van Assche2e3bbe42018-07-18 09:25:30 -0700272 rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, NULL);
Ursula Braun652a1e42017-07-28 13:56:17 +0200273 if (rc)
274 return rc;
275
276 rc = wait_event_interruptible_timeout(link->wr_reg_wait,
277 (link->wr_reg_state != POSTED),
278 SMC_WR_REG_MR_WAIT_TIME);
279 if (!rc) {
280 /* timeout - terminate connections */
281 struct smc_link_group *lgr;
282
283 lgr = container_of(link, struct smc_link_group,
284 lnk[SMC_SINGLE_LINK]);
285 smc_lgr_terminate(lgr);
286 return -EPIPE;
287 }
288 if (rc == -ERESTARTSYS)
289 return -EINTR;
290 switch (link->wr_reg_state) {
291 case CONFIRMED:
292 rc = 0;
293 break;
294 case FAILED:
295 rc = -EIO;
296 break;
297 case POSTED:
298 rc = -EPIPE;
299 break;
300 }
301 return rc;
302}
303
Ursula Braun86e780d2018-01-24 10:28:15 +0100304void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type,
Ursula Braun5f083182017-01-09 16:55:22 +0100305 smc_wr_tx_filter filter,
306 smc_wr_tx_dismisser dismisser,
307 unsigned long data)
308{
309 struct smc_wr_tx_pend_priv *tx_pend;
Ursula Braun86e780d2018-01-24 10:28:15 +0100310 struct smc_wr_rx_hdr *wr_tx;
Ursula Braun5f083182017-01-09 16:55:22 +0100311 int i;
312
313 for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) {
Ursula Braun86e780d2018-01-24 10:28:15 +0100314 wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i];
315 if (wr_tx->type != wr_tx_hdr_type)
Ursula Braun5f083182017-01-09 16:55:22 +0100316 continue;
317 tx_pend = &link->wr_tx_pends[i].priv;
318 if (filter(tx_pend, data))
319 dismisser(tx_pend);
320 }
321}
322
Ursula Braunf38ba1792017-01-09 16:55:19 +0100323/****************************** receive queue ********************************/
324
325int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler)
326{
327 struct smc_wr_rx_handler *h_iter;
328 int rc = 0;
329
330 spin_lock(&smc_wr_rx_hash_lock);
331 hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) {
332 if (h_iter->type == handler->type) {
333 rc = -EEXIST;
334 goto out_unlock;
335 }
336 }
337 hash_add(smc_wr_rx_hash, &handler->list, handler->type);
338out_unlock:
339 spin_unlock(&smc_wr_rx_hash_lock);
340 return rc;
341}
342
343/* Demultiplex a received work request based on the message type to its handler.
344 * Relies on smc_wr_rx_hash having been completely filled before any IB WRs,
345 * and not being modified any more afterwards so we don't need to lock it.
346 */
347static inline void smc_wr_rx_demultiplex(struct ib_wc *wc)
348{
349 struct smc_link *link = (struct smc_link *)wc->qp->qp_context;
350 struct smc_wr_rx_handler *handler;
351 struct smc_wr_rx_hdr *wr_rx;
352 u64 temp_wr_id;
353 u32 index;
354
355 if (wc->byte_len < sizeof(*wr_rx))
356 return; /* short message */
357 temp_wr_id = wc->wr_id;
358 index = do_div(temp_wr_id, link->wr_rx_cnt);
359 wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index];
360 hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) {
361 if (handler->type == wr_rx->type)
362 handler->handler(wc, wr_rx);
363 }
364}
365
366static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num)
367{
368 struct smc_link *link;
369 int i;
370
371 for (i = 0; i < num; i++) {
372 link = wc[i].qp->qp_context;
373 if (wc[i].status == IB_WC_SUCCESS) {
Karsten Graul877ae5b2018-05-02 16:56:44 +0200374 link->wr_rx_tstamp = jiffies;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100375 smc_wr_rx_demultiplex(&wc[i]);
376 smc_wr_rx_post(link); /* refill WR RX */
377 } else {
Ursula Braunb38d7322017-01-09 16:55:25 +0100378 struct smc_link_group *lgr;
379
Ursula Braunf38ba1792017-01-09 16:55:19 +0100380 /* handle status errors */
381 switch (wc[i].status) {
382 case IB_WC_RETRY_EXC_ERR:
383 case IB_WC_RNR_RETRY_EXC_ERR:
384 case IB_WC_WR_FLUSH_ERR:
Ursula Braunb38d7322017-01-09 16:55:25 +0100385 /* terminate connections of this link group
386 * abnormally
387 */
388 lgr = container_of(link, struct smc_link_group,
389 lnk[SMC_SINGLE_LINK]);
390 smc_lgr_terminate(lgr);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100391 break;
392 default:
393 smc_wr_rx_post(link); /* refill WR RX */
394 break;
395 }
396 }
397 }
398}
399
400static void smc_wr_rx_tasklet_fn(unsigned long data)
401{
402 struct smc_ib_device *dev = (struct smc_ib_device *)data;
403 struct ib_wc wc[SMC_WR_MAX_POLL_CQE];
404 int polled = 0;
405 int rc;
406
407again:
408 polled++;
409 do {
410 memset(&wc, 0, sizeof(wc));
411 rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc);
412 if (polled == 1) {
413 ib_req_notify_cq(dev->roce_cq_recv,
414 IB_CQ_SOLICITED_MASK
415 | IB_CQ_REPORT_MISSED_EVENTS);
416 }
417 if (!rc)
418 break;
419 smc_wr_rx_process_cqes(&wc[0], rc);
420 } while (rc > 0);
421 if (polled == 1)
422 goto again;
423}
424
425void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context)
426{
427 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context;
428
429 tasklet_schedule(&dev->recv_tasklet);
430}
431
432int smc_wr_rx_post_init(struct smc_link *link)
433{
434 u32 i;
435 int rc = 0;
436
437 for (i = 0; i < link->wr_rx_cnt; i++)
438 rc = smc_wr_rx_post(link);
439 return rc;
440}
441
442/***************************** init, exit, misc ******************************/
443
444void smc_wr_remember_qp_attr(struct smc_link *lnk)
445{
446 struct ib_qp_attr *attr = &lnk->qp_attr;
447 struct ib_qp_init_attr init_attr;
448
449 memset(attr, 0, sizeof(*attr));
450 memset(&init_attr, 0, sizeof(init_attr));
451 ib_query_qp(lnk->roce_qp, attr,
452 IB_QP_STATE |
453 IB_QP_CUR_STATE |
454 IB_QP_PKEY_INDEX |
455 IB_QP_PORT |
456 IB_QP_QKEY |
457 IB_QP_AV |
458 IB_QP_PATH_MTU |
459 IB_QP_TIMEOUT |
460 IB_QP_RETRY_CNT |
461 IB_QP_RNR_RETRY |
462 IB_QP_RQ_PSN |
463 IB_QP_ALT_PATH |
464 IB_QP_MIN_RNR_TIMER |
465 IB_QP_SQ_PSN |
466 IB_QP_PATH_MIG_STATE |
467 IB_QP_CAP |
468 IB_QP_DEST_QPN,
469 &init_attr);
470
471 lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT,
472 lnk->qp_attr.cap.max_send_wr);
473 lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3,
474 lnk->qp_attr.cap.max_recv_wr);
475}
476
477static void smc_wr_init_sge(struct smc_link *lnk)
478{
479 u32 i;
480
481 for (i = 0; i < lnk->wr_tx_cnt; i++) {
482 lnk->wr_tx_sges[i].addr =
483 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE;
484 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100485 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100486 lnk->wr_tx_ibs[i].next = NULL;
487 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i];
488 lnk->wr_tx_ibs[i].num_sge = 1;
489 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND;
490 lnk->wr_tx_ibs[i].send_flags =
Ursula Braun2c9c1682017-04-10 14:58:05 +0200491 IB_SEND_SIGNALED | IB_SEND_SOLICITED;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100492 }
493 for (i = 0; i < lnk->wr_rx_cnt; i++) {
494 lnk->wr_rx_sges[i].addr =
495 lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE;
496 lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100497 lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100498 lnk->wr_rx_ibs[i].next = NULL;
499 lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i];
500 lnk->wr_rx_ibs[i].num_sge = 1;
501 }
Ursula Braun652a1e42017-07-28 13:56:17 +0200502 lnk->wr_reg.wr.next = NULL;
503 lnk->wr_reg.wr.num_sge = 0;
504 lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED;
505 lnk->wr_reg.wr.opcode = IB_WR_REG_MR;
506 lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100507}
508
509void smc_wr_free_link(struct smc_link *lnk)
510{
511 struct ib_device *ibdev;
512
513 memset(lnk->wr_tx_mask, 0,
514 BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
515
516 if (!lnk->smcibdev)
517 return;
518 ibdev = lnk->smcibdev->ibdev;
519
520 if (lnk->wr_rx_dma_addr) {
521 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
522 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
523 DMA_FROM_DEVICE);
524 lnk->wr_rx_dma_addr = 0;
525 }
526 if (lnk->wr_tx_dma_addr) {
527 ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr,
528 SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
529 DMA_TO_DEVICE);
530 lnk->wr_tx_dma_addr = 0;
531 }
532}
533
534void smc_wr_free_link_mem(struct smc_link *lnk)
535{
536 kfree(lnk->wr_tx_pends);
537 lnk->wr_tx_pends = NULL;
538 kfree(lnk->wr_tx_mask);
539 lnk->wr_tx_mask = NULL;
540 kfree(lnk->wr_tx_sges);
541 lnk->wr_tx_sges = NULL;
542 kfree(lnk->wr_rx_sges);
543 lnk->wr_rx_sges = NULL;
544 kfree(lnk->wr_rx_ibs);
545 lnk->wr_rx_ibs = NULL;
546 kfree(lnk->wr_tx_ibs);
547 lnk->wr_tx_ibs = NULL;
548 kfree(lnk->wr_tx_bufs);
549 lnk->wr_tx_bufs = NULL;
550 kfree(lnk->wr_rx_bufs);
551 lnk->wr_rx_bufs = NULL;
552}
553
554int smc_wr_alloc_link_mem(struct smc_link *link)
555{
556 /* allocate link related memory */
557 link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL);
558 if (!link->wr_tx_bufs)
559 goto no_mem;
560 link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE,
561 GFP_KERNEL);
562 if (!link->wr_rx_bufs)
563 goto no_mem_wr_tx_bufs;
564 link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]),
565 GFP_KERNEL);
566 if (!link->wr_tx_ibs)
567 goto no_mem_wr_rx_bufs;
568 link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3,
569 sizeof(link->wr_rx_ibs[0]),
570 GFP_KERNEL);
571 if (!link->wr_rx_ibs)
572 goto no_mem_wr_tx_ibs;
573 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]),
574 GFP_KERNEL);
575 if (!link->wr_tx_sges)
576 goto no_mem_wr_rx_ibs;
577 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3,
578 sizeof(link->wr_rx_sges[0]),
579 GFP_KERNEL);
580 if (!link->wr_rx_sges)
581 goto no_mem_wr_tx_sges;
Kees Cook6396bb22018-06-12 14:03:40 -0700582 link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT),
583 sizeof(*link->wr_tx_mask),
584 GFP_KERNEL);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100585 if (!link->wr_tx_mask)
586 goto no_mem_wr_rx_sges;
587 link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT,
588 sizeof(link->wr_tx_pends[0]),
589 GFP_KERNEL);
590 if (!link->wr_tx_pends)
591 goto no_mem_wr_tx_mask;
592 return 0;
593
594no_mem_wr_tx_mask:
595 kfree(link->wr_tx_mask);
596no_mem_wr_rx_sges:
597 kfree(link->wr_rx_sges);
598no_mem_wr_tx_sges:
599 kfree(link->wr_tx_sges);
600no_mem_wr_rx_ibs:
601 kfree(link->wr_rx_ibs);
602no_mem_wr_tx_ibs:
603 kfree(link->wr_tx_ibs);
604no_mem_wr_rx_bufs:
605 kfree(link->wr_rx_bufs);
606no_mem_wr_tx_bufs:
607 kfree(link->wr_tx_bufs);
608no_mem:
609 return -ENOMEM;
610}
611
612void smc_wr_remove_dev(struct smc_ib_device *smcibdev)
613{
614 tasklet_kill(&smcibdev->recv_tasklet);
615 tasklet_kill(&smcibdev->send_tasklet);
616}
617
618void smc_wr_add_dev(struct smc_ib_device *smcibdev)
619{
620 tasklet_init(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn,
621 (unsigned long)smcibdev);
622 tasklet_init(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn,
623 (unsigned long)smcibdev);
624}
625
626int smc_wr_create_link(struct smc_link *lnk)
627{
628 struct ib_device *ibdev = lnk->smcibdev->ibdev;
629 int rc = 0;
630
631 smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0);
632 lnk->wr_rx_id = 0;
633 lnk->wr_rx_dma_addr = ib_dma_map_single(
634 ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
635 DMA_FROM_DEVICE);
636 if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) {
637 lnk->wr_rx_dma_addr = 0;
638 rc = -EIO;
639 goto out;
640 }
641 lnk->wr_tx_dma_addr = ib_dma_map_single(
642 ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt,
643 DMA_TO_DEVICE);
644 if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) {
645 rc = -EIO;
646 goto dma_unmap;
647 }
648 smc_wr_init_sge(lnk);
649 memset(lnk->wr_tx_mask, 0,
650 BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask));
Ursula Braun652a1e42017-07-28 13:56:17 +0200651 init_waitqueue_head(&lnk->wr_tx_wait);
652 init_waitqueue_head(&lnk->wr_reg_wait);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100653 return rc;
654
655dma_unmap:
656 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr,
657 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt,
658 DMA_FROM_DEVICE);
659 lnk->wr_rx_dma_addr = 0;
660out:
661 return rc;
662}