blob: c15dcd08dc74ac92202e36baf706ed9f897769b4 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Brauna4cf0442017-01-09 16:55:14 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * IB infrastructure:
6 * Establish SMC-R as an Infiniband Client to be notified about added and
7 * removed IB devices of type RDMA.
8 * Determine device and port characteristics for these IB devices.
9 *
10 * Copyright IBM Corp. 2016
11 *
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
13 */
14
15#include <linux/random.h>
Ursula Braunbd4ad572017-01-09 16:55:20 +010016#include <linux/workqueue.h>
Ursula Braun10428dd2017-07-28 13:56:22 +020017#include <linux/scatterlist.h>
Ursula Brauna4cf0442017-01-09 16:55:14 +010018#include <rdma/ib_verbs.h>
Parav Panditddb457c2018-06-05 08:40:19 +030019#include <rdma/ib_cache.h>
Ursula Brauna4cf0442017-01-09 16:55:14 +010020
Thomas Richter6812baa2017-01-09 16:55:15 +010021#include "smc_pnet.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010022#include "smc_ib.h"
Ursula Brauncd6851f2017-01-09 16:55:18 +010023#include "smc_core.h"
Ursula Braunf38ba1792017-01-09 16:55:19 +010024#include "smc_wr.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010025#include "smc.h"
26
Ursula Braunc9f4c6c2018-03-14 11:01:00 +010027#define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
28
Ursula Braunbd4ad572017-01-09 16:55:20 +010029#define SMC_QP_MIN_RNR_TIMER 5
30#define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
31#define SMC_QP_RETRY_CNT 7 /* 7: infinite */
32#define SMC_QP_RNR_RETRY 7 /* 7: infinite */
33
Ursula Brauna4cf0442017-01-09 16:55:14 +010034struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */
35 .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock),
36 .list = LIST_HEAD_INIT(smc_ib_devices.list),
37};
38
39#define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%"
40
41u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system
42 * identifier
43 */
44
Ursula Braunbd4ad572017-01-09 16:55:20 +010045static int smc_ib_modify_qp_init(struct smc_link *lnk)
46{
47 struct ib_qp_attr qp_attr;
48
49 memset(&qp_attr, 0, sizeof(qp_attr));
50 qp_attr.qp_state = IB_QPS_INIT;
51 qp_attr.pkey_index = 0;
52 qp_attr.port_num = lnk->ibport;
53 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE
54 | IB_ACCESS_REMOTE_WRITE;
55 return ib_modify_qp(lnk->roce_qp, &qp_attr,
56 IB_QP_STATE | IB_QP_PKEY_INDEX |
57 IB_QP_ACCESS_FLAGS | IB_QP_PORT);
58}
59
60static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
61{
62 enum ib_qp_attr_mask qp_attr_mask =
63 IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
64 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
65 struct ib_qp_attr qp_attr;
66
67 memset(&qp_attr, 0, sizeof(qp_attr));
68 qp_attr.qp_state = IB_QPS_RTR;
69 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -040070 qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -040071 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
Ursula Braun7005ada2018-07-25 16:35:31 +020072 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -040073 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -040074 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
Ursula Braunbd4ad572017-01-09 16:55:20 +010075 sizeof(lnk->peer_mac));
76 qp_attr.dest_qp_num = lnk->peer_qpn;
77 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
78 qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming
79 * requests
80 */
81 qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER;
82
83 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
84}
85
86int smc_ib_modify_qp_rts(struct smc_link *lnk)
87{
88 struct ib_qp_attr qp_attr;
89
90 memset(&qp_attr, 0, sizeof(qp_attr));
91 qp_attr.qp_state = IB_QPS_RTS;
92 qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */
93 qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */
94 qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */
95 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
96 qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and
97 * atomic ops allowed
98 */
99 return ib_modify_qp(lnk->roce_qp, &qp_attr,
100 IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
101 IB_QP_SQ_PSN | IB_QP_RNR_RETRY |
102 IB_QP_MAX_QP_RD_ATOMIC);
103}
104
105int smc_ib_modify_qp_reset(struct smc_link *lnk)
106{
107 struct ib_qp_attr qp_attr;
108
109 memset(&qp_attr, 0, sizeof(qp_attr));
110 qp_attr.qp_state = IB_QPS_RESET;
111 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
112}
113
114int smc_ib_ready_link(struct smc_link *lnk)
115{
Stefan Raspl00e5fb22018-07-23 13:53:10 +0200116 struct smc_link_group *lgr = smc_get_lgr(lnk);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100117 int rc = 0;
118
119 rc = smc_ib_modify_qp_init(lnk);
120 if (rc)
121 goto out;
122
123 rc = smc_ib_modify_qp_rtr(lnk);
124 if (rc)
125 goto out;
126 smc_wr_remember_qp_attr(lnk);
127 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
128 IB_CQ_SOLICITED_MASK);
129 if (rc)
130 goto out;
131 rc = smc_wr_rx_post_init(lnk);
132 if (rc)
133 goto out;
134 smc_wr_remember_qp_attr(lnk);
135
136 if (lgr->role == SMC_SERV) {
137 rc = smc_ib_modify_qp_rts(lnk);
138 if (rc)
139 goto out;
140 smc_wr_remember_qp_attr(lnk);
141 }
142out:
143 return rc;
144}
145
Ursula Braun7005ada2018-07-25 16:35:31 +0200146static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport)
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200147{
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600148 const struct ib_gid_attr *attr;
Parav Pandit5102eca2019-05-02 10:48:06 +0300149 int rc;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200150
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600151 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0);
152 if (IS_ERR(attr))
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200153 return -ENODEV;
154
Parav Pandit5102eca2019-05-02 10:48:06 +0300155 rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]);
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600156 rdma_put_gid_attr(attr);
157 return rc;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200158}
159
160/* Create an identifier unique for this instance of SMC-R.
161 * The MAC-address of the first active registered IB device
162 * plus a random 2-byte number is used to create this identifier.
163 * This name is delivered to the peer during connection initialization.
164 */
165static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
166 u8 ibport)
167{
168 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
169 sizeof(smcibdev->mac[ibport - 1]));
170 get_random_bytes(&local_systemid[0], 2);
171}
172
173bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
174{
175 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
176}
177
Ursula Braun7005ada2018-07-25 16:35:31 +0200178/* determine the gid for an ib-device port and vlan id */
179int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
180 unsigned short vlan_id, u8 gid[], u8 *sgid_index)
181{
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600182 const struct ib_gid_attr *attr;
Parav Pandit5102eca2019-05-02 10:48:06 +0300183 const struct net_device *ndev;
Ursula Braun7005ada2018-07-25 16:35:31 +0200184 int i;
185
186 for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600187 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i);
188 if (IS_ERR(attr))
Ursula Braun7005ada2018-07-25 16:35:31 +0200189 continue;
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600190
Parav Pandit5102eca2019-05-02 10:48:06 +0300191 rcu_read_lock();
192 ndev = rdma_read_gid_attr_ndev_rcu(attr);
193 if (!IS_ERR(ndev) &&
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600194 ((!vlan_id && !is_vlan_dev(attr->ndev)) ||
195 (vlan_id && is_vlan_dev(attr->ndev) &&
196 vlan_dev_vlan_id(attr->ndev) == vlan_id)) &&
197 attr->gid_type == IB_GID_TYPE_ROCE) {
Parav Pandit5102eca2019-05-02 10:48:06 +0300198 rcu_read_unlock();
Ursula Braun7005ada2018-07-25 16:35:31 +0200199 if (gid)
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600200 memcpy(gid, &attr->gid, SMC_GID_SIZE);
Ursula Braun7005ada2018-07-25 16:35:31 +0200201 if (sgid_index)
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600202 *sgid_index = attr->index;
203 rdma_put_gid_attr(attr);
Ursula Braun7005ada2018-07-25 16:35:31 +0200204 return 0;
205 }
Parav Pandit5102eca2019-05-02 10:48:06 +0300206 rcu_read_unlock();
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600207 rdma_put_gid_attr(attr);
Ursula Braun7005ada2018-07-25 16:35:31 +0200208 }
209 return -ENODEV;
210}
211
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200212static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
213{
214 int rc;
215
216 memset(&smcibdev->pattr[ibport - 1], 0,
217 sizeof(smcibdev->pattr[ibport - 1]));
218 rc = ib_query_port(smcibdev->ibdev, ibport,
219 &smcibdev->pattr[ibport - 1]);
220 if (rc)
221 goto out;
222 /* the SMC protocol requires specification of the RoCE MAC address */
Ursula Braun7005ada2018-07-25 16:35:31 +0200223 rc = smc_ib_fill_mac(smcibdev, ibport);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200224 if (rc)
225 goto out;
226 if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET,
227 sizeof(local_systemid)) &&
228 smc_ib_port_active(smcibdev, ibport))
229 /* create unique system identifier */
230 smc_ib_define_local_systemid(smcibdev, ibport);
231out:
232 return rc;
233}
234
Ursula Braunbd4ad572017-01-09 16:55:20 +0100235/* process context wrapper for might_sleep smc_ib_remember_port_attr */
236static void smc_ib_port_event_work(struct work_struct *work)
237{
238 struct smc_ib_device *smcibdev = container_of(
239 work, struct smc_ib_device, port_event_work);
240 u8 port_idx;
241
242 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
243 smc_ib_remember_port_attr(smcibdev, port_idx + 1);
244 clear_bit(port_idx, &smcibdev->port_event_mask);
Ursula Braunc3d94942019-10-09 10:07:46 +0200245 if (!smc_ib_port_active(smcibdev, port_idx + 1)) {
246 set_bit(port_idx, smcibdev->ports_going_away);
Hans Wippel9fda3512018-05-18 09:34:11 +0200247 smc_port_terminate(smcibdev, port_idx + 1);
Ursula Braunc3d94942019-10-09 10:07:46 +0200248 } else {
249 clear_bit(port_idx, smcibdev->ports_going_away);
250 }
Ursula Braunbd4ad572017-01-09 16:55:20 +0100251 }
252}
253
254/* can be called in IRQ context */
255static void smc_ib_global_event_handler(struct ib_event_handler *handler,
256 struct ib_event *ibevent)
257{
258 struct smc_ib_device *smcibdev;
259 u8 port_idx;
260
261 smcibdev = container_of(handler, struct smc_ib_device, event_handler);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100262
263 switch (ibevent->event) {
Ursula Braunbd4ad572017-01-09 16:55:20 +0100264 case IB_EVENT_DEVICE_FATAL:
Karsten Graul81cf6432019-02-12 16:29:55 +0100265 /* terminate all ports on device */
Ursula Braunc3d94942019-10-09 10:07:46 +0200266 for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
Karsten Graul81cf6432019-02-12 16:29:55 +0100267 set_bit(port_idx, &smcibdev->port_event_mask);
Ursula Braunc3d94942019-10-09 10:07:46 +0200268 set_bit(port_idx, smcibdev->ports_going_away);
269 }
Ursula Braunbd4ad572017-01-09 16:55:20 +0100270 schedule_work(&smcibdev->port_event_work);
271 break;
Karsten Graul81cf6432019-02-12 16:29:55 +0100272 case IB_EVENT_PORT_ERR:
273 case IB_EVENT_PORT_ACTIVE:
274 case IB_EVENT_GID_CHANGE:
275 port_idx = ibevent->element.port_num - 1;
276 if (port_idx < SMC_MAX_PORTS) {
277 set_bit(port_idx, &smcibdev->port_event_mask);
Ursula Braunc3d94942019-10-09 10:07:46 +0200278 if (ibevent->event == IB_EVENT_PORT_ERR)
279 set_bit(port_idx, smcibdev->ports_going_away);
280 else if (ibevent->event == IB_EVENT_PORT_ACTIVE)
281 clear_bit(port_idx, smcibdev->ports_going_away);
Karsten Graul81cf6432019-02-12 16:29:55 +0100282 schedule_work(&smcibdev->port_event_work);
283 }
284 break;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100285 default:
286 break;
287 }
288}
289
Ursula Braunf38ba1792017-01-09 16:55:19 +0100290void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
291{
Ursula Braunda05bf22018-01-26 09:28:46 +0100292 if (lnk->roce_pd)
293 ib_dealloc_pd(lnk->roce_pd);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100294 lnk->roce_pd = NULL;
295}
296
297int smc_ib_create_protection_domain(struct smc_link *lnk)
298{
299 int rc;
300
Ursula Braun897e1c22017-07-28 13:56:16 +0200301 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100302 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
303 if (IS_ERR(lnk->roce_pd))
304 lnk->roce_pd = NULL;
305 return rc;
306}
307
308static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
309{
Karsten Graule5f3aa02019-01-30 18:51:07 +0100310 struct smc_link *lnk = (struct smc_link *)priv;
311 struct smc_ib_device *smcibdev = lnk->smcibdev;
Ursula Braunda05bf22018-01-26 09:28:46 +0100312 u8 port_idx;
313
Ursula Braunf38ba1792017-01-09 16:55:19 +0100314 switch (ibevent->event) {
Karsten Graul81cf6432019-02-12 16:29:55 +0100315 case IB_EVENT_QP_FATAL:
Ursula Braunf38ba1792017-01-09 16:55:19 +0100316 case IB_EVENT_QP_ACCESS_ERR:
Karsten Graule5f3aa02019-01-30 18:51:07 +0100317 port_idx = ibevent->element.qp->port - 1;
Karsten Graul81cf6432019-02-12 16:29:55 +0100318 if (port_idx < SMC_MAX_PORTS) {
319 set_bit(port_idx, &smcibdev->port_event_mask);
Ursula Braunc3d94942019-10-09 10:07:46 +0200320 set_bit(port_idx, smcibdev->ports_going_away);
Karsten Graul81cf6432019-02-12 16:29:55 +0100321 schedule_work(&smcibdev->port_event_work);
322 }
Ursula Braunf38ba1792017-01-09 16:55:19 +0100323 break;
324 default:
325 break;
326 }
327}
328
329void smc_ib_destroy_queue_pair(struct smc_link *lnk)
330{
Ursula Braunda05bf22018-01-26 09:28:46 +0100331 if (lnk->roce_qp)
332 ib_destroy_qp(lnk->roce_qp);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100333 lnk->roce_qp = NULL;
334}
335
336/* create a queue pair within the protection domain for a link */
337int smc_ib_create_queue_pair(struct smc_link *lnk)
338{
339 struct ib_qp_init_attr qp_attr = {
340 .event_handler = smc_ib_qp_event_handler,
341 .qp_context = lnk,
342 .send_cq = lnk->smcibdev->roce_cq_send,
343 .recv_cq = lnk->smcibdev->roce_cq_recv,
344 .srq = NULL,
345 .cap = {
Ursula Braunf38ba1792017-01-09 16:55:19 +0100346 /* include unsolicited rdma_writes as well,
347 * there are max. 2 RDMA_WRITE per 1 WR_SEND
348 */
Ursula Braun652a1e42017-07-28 13:56:17 +0200349 .max_send_wr = SMC_WR_BUF_CNT * 3,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100350 .max_recv_wr = SMC_WR_BUF_CNT * 3,
351 .max_send_sge = SMC_IB_MAX_SEND_SGE,
352 .max_recv_sge = 1,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100353 },
354 .sq_sig_type = IB_SIGNAL_REQ_WR,
355 .qp_type = IB_QPT_RC,
356 };
357 int rc;
358
359 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
360 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
361 if (IS_ERR(lnk->roce_qp))
362 lnk->roce_qp = NULL;
363 else
364 smc_wr_remember_qp_attr(lnk);
365 return rc;
366}
367
Ursula Braun897e1c22017-07-28 13:56:16 +0200368void smc_ib_put_memory_region(struct ib_mr *mr)
369{
370 ib_dereg_mr(mr);
371}
372
373static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot)
374{
375 unsigned int offset = 0;
376 int sg_num;
377
378 /* map the largest prefix of a dma mapped SG list */
379 sg_num = ib_map_mr_sg(buf_slot->mr_rx[SMC_SINGLE_LINK],
380 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
381 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
382 &offset, PAGE_SIZE);
383
384 return sg_num;
385}
386
387/* Allocate a memory region and map the dma mapped SG list of buf_slot */
388int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
389 struct smc_buf_desc *buf_slot)
390{
391 if (buf_slot->mr_rx[SMC_SINGLE_LINK])
392 return 0; /* already done */
393
394 buf_slot->mr_rx[SMC_SINGLE_LINK] =
395 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
396 if (IS_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK])) {
397 int rc;
398
399 rc = PTR_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK]);
400 buf_slot->mr_rx[SMC_SINGLE_LINK] = NULL;
401 return rc;
402 }
403
404 if (smc_ib_map_mr_sg(buf_slot) != 1)
405 return -EINVAL;
406
407 return 0;
408}
409
Ursula Braun10428dd2017-07-28 13:56:22 +0200410/* synchronize buffer usage for cpu access */
411void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev,
412 struct smc_buf_desc *buf_slot,
413 enum dma_data_direction data_direction)
414{
415 struct scatterlist *sg;
416 unsigned int i;
417
418 /* for now there is just one DMA address */
419 for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
420 buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
421 if (!sg_dma_len(sg))
422 break;
423 ib_dma_sync_single_for_cpu(smcibdev->ibdev,
424 sg_dma_address(sg),
425 sg_dma_len(sg),
426 data_direction);
427 }
428}
429
430/* synchronize buffer usage for device access */
431void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev,
432 struct smc_buf_desc *buf_slot,
433 enum dma_data_direction data_direction)
434{
435 struct scatterlist *sg;
436 unsigned int i;
437
438 /* for now there is just one DMA address */
439 for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg,
440 buf_slot->sgt[SMC_SINGLE_LINK].nents, i) {
441 if (!sg_dma_len(sg))
442 break;
443 ib_dma_sync_single_for_device(smcibdev->ibdev,
444 sg_dma_address(sg),
445 sg_dma_len(sg),
446 data_direction);
447 }
448}
449
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200450/* Map a new TX or RX buffer SG-table to DMA */
451int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev,
452 struct smc_buf_desc *buf_slot,
453 enum dma_data_direction data_direction)
454{
455 int mapped_nents;
456
457 mapped_nents = ib_dma_map_sg(smcibdev->ibdev,
458 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
459 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
460 data_direction);
461 if (!mapped_nents)
462 return -ENOMEM;
463
464 return mapped_nents;
465}
466
467void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev,
468 struct smc_buf_desc *buf_slot,
469 enum dma_data_direction data_direction)
470{
471 if (!buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address)
472 return; /* already unmapped */
473
474 ib_dma_unmap_sg(smcibdev->ibdev,
475 buf_slot->sgt[SMC_SINGLE_LINK].sgl,
476 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents,
477 data_direction);
478 buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0;
479}
480
Ursula Braunbd4ad572017-01-09 16:55:20 +0100481long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
482{
483 struct ib_cq_init_attr cqattr = {
Ursula Braunc9f4c6c2018-03-14 11:01:00 +0100484 .cqe = SMC_MAX_CQE, .comp_vector = 0 };
485 int cqe_size_order, smc_order;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100486 long rc;
487
Ursula Braunc9f4c6c2018-03-14 11:01:00 +0100488 /* the calculated number of cq entries fits to mlx5 cq allocation */
489 cqe_size_order = cache_line_size() == 128 ? 7 : 6;
490 smc_order = MAX_ORDER - cqe_size_order - 1;
491 if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
492 cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100493 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
494 smc_wr_tx_cq_handler, NULL,
495 smcibdev, &cqattr);
496 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
497 if (IS_ERR(smcibdev->roce_cq_send)) {
498 smcibdev->roce_cq_send = NULL;
499 return rc;
500 }
501 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
502 smc_wr_rx_cq_handler, NULL,
503 smcibdev, &cqattr);
504 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
505 if (IS_ERR(smcibdev->roce_cq_recv)) {
506 smcibdev->roce_cq_recv = NULL;
507 goto err;
508 }
Ursula Braunbd4ad572017-01-09 16:55:20 +0100509 smc_wr_add_dev(smcibdev);
510 smcibdev->initialized = 1;
511 return rc;
512
513err:
514 ib_destroy_cq(smcibdev->roce_cq_send);
515 return rc;
516}
517
518static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
519{
520 if (!smcibdev->initialized)
521 return;
Ursula Braunda05bf22018-01-26 09:28:46 +0100522 smcibdev->initialized = 0;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100523 ib_destroy_cq(smcibdev->roce_cq_recv);
524 ib_destroy_cq(smcibdev->roce_cq_send);
Ursula Braun6a37ad32019-11-14 13:02:46 +0100525 smc_wr_remove_dev(smcibdev);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100526}
527
Ursula Brauna4cf0442017-01-09 16:55:14 +0100528static struct ib_client smc_ib_client;
529
530/* callback function for ib_register_client() */
531static void smc_ib_add_dev(struct ib_device *ibdev)
532{
533 struct smc_ib_device *smcibdev;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200534 u8 port_cnt;
535 int i;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100536
537 if (ibdev->node_type != RDMA_NODE_IB_CA)
538 return;
539
540 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
541 if (!smcibdev)
542 return;
543
544 smcibdev->ibdev = ibdev;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100545 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100546
547 spin_lock(&smc_ib_devices.lock);
548 list_add_tail(&smcibdev->list, &smc_ib_devices.list);
549 spin_unlock(&smc_ib_devices.lock);
550 ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200551 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
552 smc_ib_global_event_handler);
553 ib_register_event_handler(&smcibdev->event_handler);
554
555 /* trigger reading of the port attributes */
556 port_cnt = smcibdev->ibdev->phys_port_cnt;
557 for (i = 0;
558 i < min_t(size_t, port_cnt, SMC_MAX_PORTS);
Ursula Braun0afff912018-06-28 19:05:05 +0200559 i++) {
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200560 set_bit(i, &smcibdev->port_event_mask);
Ursula Braun0afff912018-06-28 19:05:05 +0200561 /* determine pnetids of the port */
562 smc_pnetid_by_dev_port(ibdev->dev.parent, i,
563 smcibdev->pnetid[i]);
564 }
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200565 schedule_work(&smcibdev->port_event_work);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100566}
567
568/* callback function for ib_register_client() */
569static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
570{
571 struct smc_ib_device *smcibdev;
572
573 smcibdev = ib_get_client_data(ibdev, &smc_ib_client);
574 ib_set_client_data(ibdev, &smc_ib_client, NULL);
575 spin_lock(&smc_ib_devices.lock);
576 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
577 spin_unlock(&smc_ib_devices.lock);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100578 smc_ib_cleanup_per_ibdev(smcibdev);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200579 ib_unregister_event_handler(&smcibdev->event_handler);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100580 kfree(smcibdev);
581}
582
583static struct ib_client smc_ib_client = {
584 .name = "smc_ib",
585 .add = smc_ib_add_dev,
586 .remove = smc_ib_remove_dev,
587};
588
589int __init smc_ib_register_client(void)
590{
591 return ib_register_client(&smc_ib_client);
592}
593
594void smc_ib_unregister_client(void)
595{
596 ib_unregister_client(&smc_ib_client);
597}