blob: 7d7ba0320d5aefc539838c0bf13e365e181d43a7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Brauna4cf0442017-01-09 16:55:14 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * IB infrastructure:
6 * Establish SMC-R as an Infiniband Client to be notified about added and
7 * removed IB devices of type RDMA.
8 * Determine device and port characteristics for these IB devices.
9 *
10 * Copyright IBM Corp. 2016
11 *
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
13 */
14
15#include <linux/random.h>
Ursula Braunbd4ad572017-01-09 16:55:20 +010016#include <linux/workqueue.h>
Ursula Braun10428dd2017-07-28 13:56:22 +020017#include <linux/scatterlist.h>
Ursula Braun6dabd402019-11-16 17:47:29 +010018#include <linux/wait.h>
Ursula Braun92f3cb02020-07-08 17:05:13 +020019#include <linux/mutex.h>
Ursula Brauna4cf0442017-01-09 16:55:14 +010020#include <rdma/ib_verbs.h>
Parav Panditddb457c2018-06-05 08:40:19 +030021#include <rdma/ib_cache.h>
Ursula Brauna4cf0442017-01-09 16:55:14 +010022
Thomas Richter6812baa2017-01-09 16:55:15 +010023#include "smc_pnet.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010024#include "smc_ib.h"
Ursula Brauncd6851f2017-01-09 16:55:18 +010025#include "smc_core.h"
Ursula Braunf38ba1792017-01-09 16:55:19 +010026#include "smc_wr.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010027#include "smc.h"
Guvenc Gulcea3db10e2020-12-01 20:20:49 +010028#include "smc_netlink.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010029
Ursula Braunc9f4c6c2018-03-14 11:01:00 +010030#define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
31
Ursula Braunbd4ad572017-01-09 16:55:20 +010032#define SMC_QP_MIN_RNR_TIMER 5
33#define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
34#define SMC_QP_RETRY_CNT 7 /* 7: infinite */
35#define SMC_QP_RNR_RETRY 7 /* 7: infinite */
36
Ursula Brauna4cf0442017-01-09 16:55:14 +010037struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */
Ursula Braun92f3cb02020-07-08 17:05:13 +020038 .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex),
Ursula Brauna4cf0442017-01-09 16:55:14 +010039 .list = LIST_HEAD_INIT(smc_ib_devices.list),
40};
41
Hans Wippel366bb242020-02-25 22:41:21 +010042u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
Ursula Brauna4cf0442017-01-09 16:55:14 +010043
Ursula Braunbd4ad572017-01-09 16:55:20 +010044static int smc_ib_modify_qp_init(struct smc_link *lnk)
45{
46 struct ib_qp_attr qp_attr;
47
48 memset(&qp_attr, 0, sizeof(qp_attr));
49 qp_attr.qp_state = IB_QPS_INIT;
50 qp_attr.pkey_index = 0;
51 qp_attr.port_num = lnk->ibport;
52 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE
53 | IB_ACCESS_REMOTE_WRITE;
54 return ib_modify_qp(lnk->roce_qp, &qp_attr,
55 IB_QP_STATE | IB_QP_PKEY_INDEX |
56 IB_QP_ACCESS_FLAGS | IB_QP_PORT);
57}
58
59static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
60{
61 enum ib_qp_attr_mask qp_attr_mask =
62 IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
63 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
64 struct ib_qp_attr qp_attr;
65
66 memset(&qp_attr, 0, sizeof(qp_attr));
67 qp_attr.qp_state = IB_QPS_RTR;
68 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -040069 qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -040070 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
Ursula Braun7005ada2018-07-25 16:35:31 +020071 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -040072 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -040073 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
Ursula Braunbd4ad572017-01-09 16:55:20 +010074 sizeof(lnk->peer_mac));
75 qp_attr.dest_qp_num = lnk->peer_qpn;
76 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
77 qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming
78 * requests
79 */
80 qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER;
81
82 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
83}
84
85int smc_ib_modify_qp_rts(struct smc_link *lnk)
86{
87 struct ib_qp_attr qp_attr;
88
89 memset(&qp_attr, 0, sizeof(qp_attr));
90 qp_attr.qp_state = IB_QPS_RTS;
91 qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */
92 qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */
93 qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */
94 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
95 qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and
96 * atomic ops allowed
97 */
98 return ib_modify_qp(lnk->roce_qp, &qp_attr,
99 IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
100 IB_QP_SQ_PSN | IB_QP_RNR_RETRY |
101 IB_QP_MAX_QP_RD_ATOMIC);
102}
103
104int smc_ib_modify_qp_reset(struct smc_link *lnk)
105{
106 struct ib_qp_attr qp_attr;
107
108 memset(&qp_attr, 0, sizeof(qp_attr));
109 qp_attr.qp_state = IB_QPS_RESET;
110 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
111}
112
113int smc_ib_ready_link(struct smc_link *lnk)
114{
Stefan Raspl00e5fb22018-07-23 13:53:10 +0200115 struct smc_link_group *lgr = smc_get_lgr(lnk);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100116 int rc = 0;
117
118 rc = smc_ib_modify_qp_init(lnk);
119 if (rc)
120 goto out;
121
122 rc = smc_ib_modify_qp_rtr(lnk);
123 if (rc)
124 goto out;
125 smc_wr_remember_qp_attr(lnk);
126 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
127 IB_CQ_SOLICITED_MASK);
128 if (rc)
129 goto out;
130 rc = smc_wr_rx_post_init(lnk);
131 if (rc)
132 goto out;
133 smc_wr_remember_qp_attr(lnk);
134
135 if (lgr->role == SMC_SERV) {
136 rc = smc_ib_modify_qp_rts(lnk);
137 if (rc)
138 goto out;
139 smc_wr_remember_qp_attr(lnk);
140 }
141out:
142 return rc;
143}
144
Ursula Braun7005ada2018-07-25 16:35:31 +0200145static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport)
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200146{
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600147 const struct ib_gid_attr *attr;
Parav Pandit5102eca2019-05-02 10:48:06 +0300148 int rc;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200149
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600150 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0);
151 if (IS_ERR(attr))
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200152 return -ENODEV;
153
Parav Pandit5102eca2019-05-02 10:48:06 +0300154 rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]);
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600155 rdma_put_gid_attr(attr);
156 return rc;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200157}
158
159/* Create an identifier unique for this instance of SMC-R.
160 * The MAC-address of the first active registered IB device
161 * plus a random 2-byte number is used to create this identifier.
162 * This name is delivered to the peer during connection initialization.
163 */
164static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
165 u8 ibport)
166{
167 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
168 sizeof(smcibdev->mac[ibport - 1]));
Hans Wippel366bb242020-02-25 22:41:21 +0100169}
170
Hans Wippela082ec82020-02-25 22:41:22 +0100171bool smc_ib_is_valid_local_systemid(void)
Hans Wippel366bb242020-02-25 22:41:21 +0100172{
173 return !is_zero_ether_addr(&local_systemid[2]);
174}
175
176static void smc_ib_init_local_systemid(void)
177{
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200178 get_random_bytes(&local_systemid[0], 2);
179}
180
181bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
182{
183 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
184}
185
Ursula Braun7005ada2018-07-25 16:35:31 +0200186/* determine the gid for an ib-device port and vlan id */
187int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
188 unsigned short vlan_id, u8 gid[], u8 *sgid_index)
189{
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600190 const struct ib_gid_attr *attr;
Parav Pandit5102eca2019-05-02 10:48:06 +0300191 const struct net_device *ndev;
Ursula Braun7005ada2018-07-25 16:35:31 +0200192 int i;
193
194 for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600195 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i);
196 if (IS_ERR(attr))
Ursula Braun7005ada2018-07-25 16:35:31 +0200197 continue;
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600198
Parav Pandit5102eca2019-05-02 10:48:06 +0300199 rcu_read_lock();
200 ndev = rdma_read_gid_attr_ndev_rcu(attr);
201 if (!IS_ERR(ndev) &&
Karsten Graul41a0be32020-11-18 22:40:38 +0100202 ((!vlan_id && !is_vlan_dev(ndev)) ||
203 (vlan_id && is_vlan_dev(ndev) &&
204 vlan_dev_vlan_id(ndev) == vlan_id)) &&
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600205 attr->gid_type == IB_GID_TYPE_ROCE) {
Parav Pandit5102eca2019-05-02 10:48:06 +0300206 rcu_read_unlock();
Ursula Braun7005ada2018-07-25 16:35:31 +0200207 if (gid)
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600208 memcpy(gid, &attr->gid, SMC_GID_SIZE);
Ursula Braun7005ada2018-07-25 16:35:31 +0200209 if (sgid_index)
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600210 *sgid_index = attr->index;
211 rdma_put_gid_attr(attr);
Ursula Braun7005ada2018-07-25 16:35:31 +0200212 return 0;
213 }
Parav Pandit5102eca2019-05-02 10:48:06 +0300214 rcu_read_unlock();
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600215 rdma_put_gid_attr(attr);
Ursula Braun7005ada2018-07-25 16:35:31 +0200216 }
217 return -ENODEV;
218}
219
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200220static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
221{
222 int rc;
223
224 memset(&smcibdev->pattr[ibport - 1], 0,
225 sizeof(smcibdev->pattr[ibport - 1]));
226 rc = ib_query_port(smcibdev->ibdev, ibport,
227 &smcibdev->pattr[ibport - 1]);
228 if (rc)
229 goto out;
230 /* the SMC protocol requires specification of the RoCE MAC address */
Ursula Braun7005ada2018-07-25 16:35:31 +0200231 rc = smc_ib_fill_mac(smcibdev, ibport);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200232 if (rc)
233 goto out;
Hans Wippel366bb242020-02-25 22:41:21 +0100234 if (!smc_ib_is_valid_local_systemid() &&
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200235 smc_ib_port_active(smcibdev, ibport))
236 /* create unique system identifier */
237 smc_ib_define_local_systemid(smcibdev, ibport);
238out:
239 return rc;
240}
241
Ursula Braunbd4ad572017-01-09 16:55:20 +0100242/* process context wrapper for might_sleep smc_ib_remember_port_attr */
243static void smc_ib_port_event_work(struct work_struct *work)
244{
245 struct smc_ib_device *smcibdev = container_of(
246 work, struct smc_ib_device, port_event_work);
247 u8 port_idx;
248
249 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
250 smc_ib_remember_port_attr(smcibdev, port_idx + 1);
251 clear_bit(port_idx, &smcibdev->port_event_mask);
Ursula Braunc3d94942019-10-09 10:07:46 +0200252 if (!smc_ib_port_active(smcibdev, port_idx + 1)) {
253 set_bit(port_idx, smcibdev->ports_going_away);
Karsten Graul541afa12020-05-01 12:48:08 +0200254 smcr_port_err(smcibdev, port_idx + 1);
Ursula Braunc3d94942019-10-09 10:07:46 +0200255 } else {
256 clear_bit(port_idx, smcibdev->ports_going_away);
Karsten Graul1f90a052020-05-01 12:48:07 +0200257 smcr_port_add(smcibdev, port_idx + 1);
Ursula Braunc3d94942019-10-09 10:07:46 +0200258 }
Ursula Braunbd4ad572017-01-09 16:55:20 +0100259 }
260}
261
262/* can be called in IRQ context */
263static void smc_ib_global_event_handler(struct ib_event_handler *handler,
264 struct ib_event *ibevent)
265{
266 struct smc_ib_device *smcibdev;
Ursula Braun5613f202020-02-17 16:24:55 +0100267 bool schedule = false;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100268 u8 port_idx;
269
270 smcibdev = container_of(handler, struct smc_ib_device, event_handler);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100271
272 switch (ibevent->event) {
Ursula Braunbd4ad572017-01-09 16:55:20 +0100273 case IB_EVENT_DEVICE_FATAL:
Karsten Graul81cf6432019-02-12 16:29:55 +0100274 /* terminate all ports on device */
Ursula Braunc3d94942019-10-09 10:07:46 +0200275 for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
Karsten Graul81cf6432019-02-12 16:29:55 +0100276 set_bit(port_idx, &smcibdev->port_event_mask);
Ursula Braun5613f202020-02-17 16:24:55 +0100277 if (!test_and_set_bit(port_idx,
278 smcibdev->ports_going_away))
279 schedule = true;
Ursula Braunc3d94942019-10-09 10:07:46 +0200280 }
Ursula Braun5613f202020-02-17 16:24:55 +0100281 if (schedule)
282 schedule_work(&smcibdev->port_event_work);
283 break;
284 case IB_EVENT_PORT_ACTIVE:
285 port_idx = ibevent->element.port_num - 1;
286 if (port_idx >= SMC_MAX_PORTS)
287 break;
288 set_bit(port_idx, &smcibdev->port_event_mask);
289 if (test_and_clear_bit(port_idx, smcibdev->ports_going_away))
290 schedule_work(&smcibdev->port_event_work);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100291 break;
Karsten Graul81cf6432019-02-12 16:29:55 +0100292 case IB_EVENT_PORT_ERR:
Ursula Braun5613f202020-02-17 16:24:55 +0100293 port_idx = ibevent->element.port_num - 1;
294 if (port_idx >= SMC_MAX_PORTS)
295 break;
296 set_bit(port_idx, &smcibdev->port_event_mask);
297 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
298 schedule_work(&smcibdev->port_event_work);
299 break;
Karsten Graul81cf6432019-02-12 16:29:55 +0100300 case IB_EVENT_GID_CHANGE:
301 port_idx = ibevent->element.port_num - 1;
Ursula Braun5613f202020-02-17 16:24:55 +0100302 if (port_idx >= SMC_MAX_PORTS)
303 break;
304 set_bit(port_idx, &smcibdev->port_event_mask);
305 schedule_work(&smcibdev->port_event_work);
Karsten Graul81cf6432019-02-12 16:29:55 +0100306 break;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100307 default:
308 break;
309 }
310}
311
Ursula Braunf38ba1792017-01-09 16:55:19 +0100312void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
313{
Ursula Braunda05bf22018-01-26 09:28:46 +0100314 if (lnk->roce_pd)
315 ib_dealloc_pd(lnk->roce_pd);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100316 lnk->roce_pd = NULL;
317}
318
319int smc_ib_create_protection_domain(struct smc_link *lnk)
320{
321 int rc;
322
Ursula Braun897e1c22017-07-28 13:56:16 +0200323 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100324 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
325 if (IS_ERR(lnk->roce_pd))
326 lnk->roce_pd = NULL;
327 return rc;
328}
329
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100330static bool smcr_diag_is_dev_critical(struct smc_lgr_list *smc_lgr,
331 struct smc_ib_device *smcibdev)
332{
333 struct smc_link_group *lgr;
334 bool rc = false;
335 int i;
336
337 spin_lock_bh(&smc_lgr->lock);
338 list_for_each_entry(lgr, &smc_lgr->list, list) {
339 if (lgr->is_smcd)
340 continue;
341 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
342 if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
343 lgr->lnk[i].smcibdev != smcibdev)
344 continue;
345 if (lgr->type == SMC_LGR_SINGLE ||
346 lgr->type == SMC_LGR_ASYMMETRIC_LOCAL) {
347 rc = true;
348 goto out;
349 }
350 }
351 }
352out:
353 spin_unlock_bh(&smc_lgr->lock);
354 return rc;
355}
356
357static int smc_nl_handle_dev_port(struct sk_buff *skb,
358 struct ib_device *ibdev,
359 struct smc_ib_device *smcibdev,
360 int port)
361{
362 char smc_pnet[SMC_MAX_PNETID_LEN + 1];
363 struct nlattr *port_attrs;
364 unsigned char port_state;
365 int lnk_count = 0;
366
367 port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT + port);
368 if (!port_attrs)
369 goto errout;
370
371 if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR,
372 smcibdev->pnetid_by_user[port]))
373 goto errattr;
Guvenc Gulce8a446532021-01-12 17:21:22 +0100374 memcpy(smc_pnet, &smcibdev->pnetid[port], SMC_MAX_PNETID_LEN);
375 smc_pnet[SMC_MAX_PNETID_LEN] = 0;
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100376 if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
377 goto errattr;
378 if (nla_put_u32(skb, SMC_NLA_DEV_PORT_NETDEV,
379 smcibdev->ndev_ifidx[port]))
380 goto errattr;
381 if (nla_put_u8(skb, SMC_NLA_DEV_PORT_VALID, 1))
382 goto errattr;
383 port_state = smc_ib_port_active(smcibdev, port + 1);
384 if (nla_put_u8(skb, SMC_NLA_DEV_PORT_STATE, port_state))
385 goto errattr;
386 lnk_count = atomic_read(&smcibdev->lnk_cnt_by_port[port]);
387 if (nla_put_u32(skb, SMC_NLA_DEV_PORT_LNK_CNT, lnk_count))
388 goto errattr;
389 nla_nest_end(skb, port_attrs);
390 return 0;
391errattr:
392 nla_nest_cancel(skb, port_attrs);
393errout:
394 return -EMSGSIZE;
395}
396
Karsten Graul995433b2020-12-15 10:10:58 +0100397static bool smc_nl_handle_pci_values(const struct smc_pci_dev *smc_pci_dev,
398 struct sk_buff *skb)
399{
400 if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev->pci_fid))
401 return false;
402 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev->pci_pchid))
403 return false;
404 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev->pci_vendor))
405 return false;
406 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev->pci_device))
407 return false;
408 if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev->pci_id))
409 return false;
410 return true;
411}
412
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100413static int smc_nl_handle_smcr_dev(struct smc_ib_device *smcibdev,
414 struct sk_buff *skb,
415 struct netlink_callback *cb)
416{
Guvenc Gulce8a446532021-01-12 17:21:22 +0100417 char smc_ibname[IB_DEVICE_NAME_MAX];
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100418 struct smc_pci_dev smc_pci_dev;
419 struct pci_dev *pci_dev;
420 unsigned char is_crit;
421 struct nlattr *attrs;
422 void *nlh;
423 int i;
424
425 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
426 &smc_gen_nl_family, NLM_F_MULTI,
427 SMC_NETLINK_GET_DEV_SMCR);
428 if (!nlh)
429 goto errmsg;
430 attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCR);
431 if (!attrs)
432 goto errout;
433 is_crit = smcr_diag_is_dev_critical(&smc_lgr_list, smcibdev);
434 if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, is_crit))
435 goto errattr;
Karsten Graul995433b2020-12-15 10:10:58 +0100436 if (smcibdev->ibdev->dev.parent) {
437 memset(&smc_pci_dev, 0, sizeof(smc_pci_dev));
438 pci_dev = to_pci_dev(smcibdev->ibdev->dev.parent);
439 smc_set_pci_values(pci_dev, &smc_pci_dev);
440 if (!smc_nl_handle_pci_values(&smc_pci_dev, skb))
441 goto errattr;
442 }
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100443 snprintf(smc_ibname, sizeof(smc_ibname), "%s", smcibdev->ibdev->name);
444 if (nla_put_string(skb, SMC_NLA_DEV_IB_NAME, smc_ibname))
445 goto errattr;
446 for (i = 1; i <= SMC_MAX_PORTS; i++) {
447 if (!rdma_is_port_valid(smcibdev->ibdev, i))
448 continue;
449 if (smc_nl_handle_dev_port(skb, smcibdev->ibdev,
450 smcibdev, i - 1))
451 goto errattr;
452 }
453
454 nla_nest_end(skb, attrs);
455 genlmsg_end(skb, nlh);
456 return 0;
457
458errattr:
459 nla_nest_cancel(skb, attrs);
460errout:
461 genlmsg_cancel(skb, nlh);
462errmsg:
463 return -EMSGSIZE;
464}
465
466static void smc_nl_prep_smcr_dev(struct smc_ib_devices *dev_list,
467 struct sk_buff *skb,
468 struct netlink_callback *cb)
469{
470 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
471 struct smc_ib_device *smcibdev;
472 int snum = cb_ctx->pos[0];
473 int num = 0;
474
475 mutex_lock(&dev_list->mutex);
476 list_for_each_entry(smcibdev, &dev_list->list, list) {
477 if (num < snum)
478 goto next;
479 if (smc_nl_handle_smcr_dev(smcibdev, skb, cb))
480 goto errout;
481next:
482 num++;
483 }
484errout:
485 mutex_unlock(&dev_list->mutex);
486 cb_ctx->pos[0] = num;
487}
488
489int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
490{
491 smc_nl_prep_smcr_dev(&smc_ib_devices, skb, cb);
492 return skb->len;
493}
494
Ursula Braunf38ba1792017-01-09 16:55:19 +0100495static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
496{
Karsten Graule5f3aa02019-01-30 18:51:07 +0100497 struct smc_link *lnk = (struct smc_link *)priv;
498 struct smc_ib_device *smcibdev = lnk->smcibdev;
Ursula Braunda05bf22018-01-26 09:28:46 +0100499 u8 port_idx;
500
Ursula Braunf38ba1792017-01-09 16:55:19 +0100501 switch (ibevent->event) {
Karsten Graul81cf6432019-02-12 16:29:55 +0100502 case IB_EVENT_QP_FATAL:
Ursula Braunf38ba1792017-01-09 16:55:19 +0100503 case IB_EVENT_QP_ACCESS_ERR:
Karsten Graule5f3aa02019-01-30 18:51:07 +0100504 port_idx = ibevent->element.qp->port - 1;
Ursula Braun5613f202020-02-17 16:24:55 +0100505 if (port_idx >= SMC_MAX_PORTS)
506 break;
507 set_bit(port_idx, &smcibdev->port_event_mask);
508 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
Karsten Graul81cf6432019-02-12 16:29:55 +0100509 schedule_work(&smcibdev->port_event_work);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100510 break;
511 default:
512 break;
513 }
514}
515
516void smc_ib_destroy_queue_pair(struct smc_link *lnk)
517{
Ursula Braunda05bf22018-01-26 09:28:46 +0100518 if (lnk->roce_qp)
519 ib_destroy_qp(lnk->roce_qp);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100520 lnk->roce_qp = NULL;
521}
522
523/* create a queue pair within the protection domain for a link */
524int smc_ib_create_queue_pair(struct smc_link *lnk)
525{
526 struct ib_qp_init_attr qp_attr = {
527 .event_handler = smc_ib_qp_event_handler,
528 .qp_context = lnk,
529 .send_cq = lnk->smcibdev->roce_cq_send,
530 .recv_cq = lnk->smcibdev->roce_cq_recv,
531 .srq = NULL,
532 .cap = {
Ursula Braunf38ba1792017-01-09 16:55:19 +0100533 /* include unsolicited rdma_writes as well,
534 * there are max. 2 RDMA_WRITE per 1 WR_SEND
535 */
Ursula Braun652a1e42017-07-28 13:56:17 +0200536 .max_send_wr = SMC_WR_BUF_CNT * 3,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100537 .max_recv_wr = SMC_WR_BUF_CNT * 3,
538 .max_send_sge = SMC_IB_MAX_SEND_SGE,
539 .max_recv_sge = 1,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100540 },
541 .sq_sig_type = IB_SIGNAL_REQ_WR,
542 .qp_type = IB_QPT_RC,
543 };
544 int rc;
545
546 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
547 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
548 if (IS_ERR(lnk->roce_qp))
549 lnk->roce_qp = NULL;
550 else
551 smc_wr_remember_qp_attr(lnk);
552 return rc;
553}
554
Ursula Braun897e1c22017-07-28 13:56:16 +0200555void smc_ib_put_memory_region(struct ib_mr *mr)
556{
557 ib_dereg_mr(mr);
558}
559
Karsten Graul387707f2020-04-29 17:10:40 +0200560static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot, u8 link_idx)
Ursula Braun897e1c22017-07-28 13:56:16 +0200561{
562 unsigned int offset = 0;
563 int sg_num;
564
565 /* map the largest prefix of a dma mapped SG list */
Karsten Graul387707f2020-04-29 17:10:40 +0200566 sg_num = ib_map_mr_sg(buf_slot->mr_rx[link_idx],
567 buf_slot->sgt[link_idx].sgl,
568 buf_slot->sgt[link_idx].orig_nents,
Ursula Braun897e1c22017-07-28 13:56:16 +0200569 &offset, PAGE_SIZE);
570
571 return sg_num;
572}
573
574/* Allocate a memory region and map the dma mapped SG list of buf_slot */
575int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
Karsten Graul387707f2020-04-29 17:10:40 +0200576 struct smc_buf_desc *buf_slot, u8 link_idx)
Ursula Braun897e1c22017-07-28 13:56:16 +0200577{
Karsten Graul387707f2020-04-29 17:10:40 +0200578 if (buf_slot->mr_rx[link_idx])
Ursula Braun897e1c22017-07-28 13:56:16 +0200579 return 0; /* already done */
580
Karsten Graul387707f2020-04-29 17:10:40 +0200581 buf_slot->mr_rx[link_idx] =
Ursula Braun897e1c22017-07-28 13:56:16 +0200582 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
Karsten Graul387707f2020-04-29 17:10:40 +0200583 if (IS_ERR(buf_slot->mr_rx[link_idx])) {
Ursula Braun897e1c22017-07-28 13:56:16 +0200584 int rc;
585
Karsten Graul387707f2020-04-29 17:10:40 +0200586 rc = PTR_ERR(buf_slot->mr_rx[link_idx]);
587 buf_slot->mr_rx[link_idx] = NULL;
Ursula Braun897e1c22017-07-28 13:56:16 +0200588 return rc;
589 }
590
Karsten Graul387707f2020-04-29 17:10:40 +0200591 if (smc_ib_map_mr_sg(buf_slot, link_idx) != 1)
Ursula Braun897e1c22017-07-28 13:56:16 +0200592 return -EINVAL;
593
594 return 0;
595}
596
Ursula Braun10428dd2017-07-28 13:56:22 +0200597/* synchronize buffer usage for cpu access */
Karsten Graul387707f2020-04-29 17:10:40 +0200598void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
Ursula Braun10428dd2017-07-28 13:56:22 +0200599 struct smc_buf_desc *buf_slot,
600 enum dma_data_direction data_direction)
601{
602 struct scatterlist *sg;
603 unsigned int i;
604
605 /* for now there is just one DMA address */
Karsten Graul387707f2020-04-29 17:10:40 +0200606 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
607 buf_slot->sgt[lnk->link_idx].nents, i) {
Ursula Braun10428dd2017-07-28 13:56:22 +0200608 if (!sg_dma_len(sg))
609 break;
Karsten Graul387707f2020-04-29 17:10:40 +0200610 ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
Ursula Braun10428dd2017-07-28 13:56:22 +0200611 sg_dma_address(sg),
612 sg_dma_len(sg),
613 data_direction);
614 }
615}
616
617/* synchronize buffer usage for device access */
Karsten Graul387707f2020-04-29 17:10:40 +0200618void smc_ib_sync_sg_for_device(struct smc_link *lnk,
Ursula Braun10428dd2017-07-28 13:56:22 +0200619 struct smc_buf_desc *buf_slot,
620 enum dma_data_direction data_direction)
621{
622 struct scatterlist *sg;
623 unsigned int i;
624
625 /* for now there is just one DMA address */
Karsten Graul387707f2020-04-29 17:10:40 +0200626 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
627 buf_slot->sgt[lnk->link_idx].nents, i) {
Ursula Braun10428dd2017-07-28 13:56:22 +0200628 if (!sg_dma_len(sg))
629 break;
Karsten Graul387707f2020-04-29 17:10:40 +0200630 ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
Ursula Braun10428dd2017-07-28 13:56:22 +0200631 sg_dma_address(sg),
632 sg_dma_len(sg),
633 data_direction);
634 }
635}
636
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200637/* Map a new TX or RX buffer SG-table to DMA */
Karsten Graul387707f2020-04-29 17:10:40 +0200638int smc_ib_buf_map_sg(struct smc_link *lnk,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200639 struct smc_buf_desc *buf_slot,
640 enum dma_data_direction data_direction)
641{
642 int mapped_nents;
643
Karsten Graul387707f2020-04-29 17:10:40 +0200644 mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
645 buf_slot->sgt[lnk->link_idx].sgl,
646 buf_slot->sgt[lnk->link_idx].orig_nents,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200647 data_direction);
648 if (!mapped_nents)
649 return -ENOMEM;
650
651 return mapped_nents;
652}
653
Karsten Graul387707f2020-04-29 17:10:40 +0200654void smc_ib_buf_unmap_sg(struct smc_link *lnk,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200655 struct smc_buf_desc *buf_slot,
656 enum dma_data_direction data_direction)
657{
Karsten Graul387707f2020-04-29 17:10:40 +0200658 if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address)
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200659 return; /* already unmapped */
660
Karsten Graul387707f2020-04-29 17:10:40 +0200661 ib_dma_unmap_sg(lnk->smcibdev->ibdev,
662 buf_slot->sgt[lnk->link_idx].sgl,
663 buf_slot->sgt[lnk->link_idx].orig_nents,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200664 data_direction);
Karsten Graul387707f2020-04-29 17:10:40 +0200665 buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0;
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200666}
667
Ursula Braunbd4ad572017-01-09 16:55:20 +0100668long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
669{
670 struct ib_cq_init_attr cqattr = {
Ursula Braunc9f4c6c2018-03-14 11:01:00 +0100671 .cqe = SMC_MAX_CQE, .comp_vector = 0 };
672 int cqe_size_order, smc_order;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100673 long rc;
674
Karsten Graul63673592020-07-18 15:06:12 +0200675 mutex_lock(&smcibdev->mutex);
676 rc = 0;
677 if (smcibdev->initialized)
678 goto out;
Ursula Braunc9f4c6c2018-03-14 11:01:00 +0100679 /* the calculated number of cq entries fits to mlx5 cq allocation */
680 cqe_size_order = cache_line_size() == 128 ? 7 : 6;
681 smc_order = MAX_ORDER - cqe_size_order - 1;
682 if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
683 cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100684 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
685 smc_wr_tx_cq_handler, NULL,
686 smcibdev, &cqattr);
687 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
688 if (IS_ERR(smcibdev->roce_cq_send)) {
689 smcibdev->roce_cq_send = NULL;
Karsten Graul63673592020-07-18 15:06:12 +0200690 goto out;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100691 }
692 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
693 smc_wr_rx_cq_handler, NULL,
694 smcibdev, &cqattr);
695 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
696 if (IS_ERR(smcibdev->roce_cq_recv)) {
697 smcibdev->roce_cq_recv = NULL;
698 goto err;
699 }
Ursula Braunbd4ad572017-01-09 16:55:20 +0100700 smc_wr_add_dev(smcibdev);
701 smcibdev->initialized = 1;
Karsten Graul63673592020-07-18 15:06:12 +0200702 goto out;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100703
704err:
705 ib_destroy_cq(smcibdev->roce_cq_send);
Karsten Graul63673592020-07-18 15:06:12 +0200706out:
707 mutex_unlock(&smcibdev->mutex);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100708 return rc;
709}
710
711static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
712{
Karsten Graul63673592020-07-18 15:06:12 +0200713 mutex_lock(&smcibdev->mutex);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100714 if (!smcibdev->initialized)
Karsten Graul63673592020-07-18 15:06:12 +0200715 goto out;
Ursula Braunda05bf22018-01-26 09:28:46 +0100716 smcibdev->initialized = 0;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100717 ib_destroy_cq(smcibdev->roce_cq_recv);
718 ib_destroy_cq(smcibdev->roce_cq_send);
Ursula Braun6a37ad32019-11-14 13:02:46 +0100719 smc_wr_remove_dev(smcibdev);
Karsten Graul63673592020-07-18 15:06:12 +0200720out:
721 mutex_unlock(&smcibdev->mutex);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100722}
723
Ursula Brauna4cf0442017-01-09 16:55:14 +0100724static struct ib_client smc_ib_client;
725
Guvenc Gulce3d453f52020-12-01 20:20:40 +0100726static void smc_copy_netdev_ifindex(struct smc_ib_device *smcibdev, int port)
727{
728 struct ib_device *ibdev = smcibdev->ibdev;
729 struct net_device *ndev;
730
731 if (!ibdev->ops.get_netdev)
732 return;
733 ndev = ibdev->ops.get_netdev(ibdev, port + 1);
734 if (ndev) {
735 smcibdev->ndev_ifidx[port] = ndev->ifindex;
736 dev_put(ndev);
737 }
738}
739
740void smc_ib_ndev_change(struct net_device *ndev, unsigned long event)
741{
742 struct smc_ib_device *smcibdev;
743 struct ib_device *libdev;
744 struct net_device *lndev;
745 u8 port_cnt;
746 int i;
747
748 mutex_lock(&smc_ib_devices.mutex);
749 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
750 port_cnt = smcibdev->ibdev->phys_port_cnt;
751 for (i = 0; i < min_t(size_t, port_cnt, SMC_MAX_PORTS); i++) {
752 libdev = smcibdev->ibdev;
753 if (!libdev->ops.get_netdev)
754 continue;
755 lndev = libdev->ops.get_netdev(libdev, i + 1);
756 if (lndev)
757 dev_put(lndev);
758 if (lndev != ndev)
759 continue;
760 if (event == NETDEV_REGISTER)
761 smcibdev->ndev_ifidx[i] = ndev->ifindex;
762 if (event == NETDEV_UNREGISTER)
763 smcibdev->ndev_ifidx[i] = 0;
764 }
765 }
766 mutex_unlock(&smc_ib_devices.mutex);
767}
768
Ursula Brauna4cf0442017-01-09 16:55:14 +0100769/* callback function for ib_register_client() */
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300770static int smc_ib_add_dev(struct ib_device *ibdev)
Ursula Brauna4cf0442017-01-09 16:55:14 +0100771{
772 struct smc_ib_device *smcibdev;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200773 u8 port_cnt;
774 int i;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100775
776 if (ibdev->node_type != RDMA_NODE_IB_CA)
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300777 return -EOPNOTSUPP;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100778
779 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
780 if (!smcibdev)
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300781 return -ENOMEM;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100782
783 smcibdev->ibdev = ibdev;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100784 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
Ursula Braun6dabd402019-11-16 17:47:29 +0100785 atomic_set(&smcibdev->lnk_cnt, 0);
786 init_waitqueue_head(&smcibdev->lnks_deleted);
Karsten Graul63673592020-07-18 15:06:12 +0200787 mutex_init(&smcibdev->mutex);
Ursula Braun92f3cb02020-07-08 17:05:13 +0200788 mutex_lock(&smc_ib_devices.mutex);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100789 list_add_tail(&smcibdev->list, &smc_ib_devices.list);
Ursula Braun92f3cb02020-07-08 17:05:13 +0200790 mutex_unlock(&smc_ib_devices.mutex);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100791 ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200792 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
793 smc_ib_global_event_handler);
794 ib_register_event_handler(&smcibdev->event_handler);
795
796 /* trigger reading of the port attributes */
797 port_cnt = smcibdev->ibdev->phys_port_cnt;
Karsten Graul0a99be42020-05-05 15:01:20 +0200798 pr_warn_ratelimited("smc: adding ib device %s with port count %d\n",
799 smcibdev->ibdev->name, port_cnt);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200800 for (i = 0;
801 i < min_t(size_t, port_cnt, SMC_MAX_PORTS);
Ursula Braun0afff912018-06-28 19:05:05 +0200802 i++) {
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200803 set_bit(i, &smcibdev->port_event_mask);
Ursula Braun0afff912018-06-28 19:05:05 +0200804 /* determine pnetids of the port */
Karsten Graulfdff7042020-04-29 17:10:37 +0200805 if (smc_pnetid_by_dev_port(ibdev->dev.parent, i,
806 smcibdev->pnetid[i]))
807 smc_pnetid_by_table_ib(smcibdev, i + 1);
Guvenc Gulce3d453f52020-12-01 20:20:40 +0100808 smc_copy_netdev_ifindex(smcibdev, i);
Karsten Graul0a99be42020-05-05 15:01:20 +0200809 pr_warn_ratelimited("smc: ib device %s port %d has pnetid "
810 "%.16s%s\n",
811 smcibdev->ibdev->name, i + 1,
812 smcibdev->pnetid[i],
813 smcibdev->pnetid_by_user[i] ?
814 " (user defined)" :
815 "");
Ursula Braun0afff912018-06-28 19:05:05 +0200816 }
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200817 schedule_work(&smcibdev->port_event_work);
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300818 return 0;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100819}
820
Ursula Braun0b29ec62019-11-14 13:02:47 +0100821/* callback function for ib_unregister_client() */
Ursula Brauna4cf0442017-01-09 16:55:14 +0100822static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
823{
Jason Gunthorpe15879822020-04-07 20:20:09 -0300824 struct smc_ib_device *smcibdev = client_data;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100825
Ursula Braun92f3cb02020-07-08 17:05:13 +0200826 mutex_lock(&smc_ib_devices.mutex);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100827 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
Ursula Braun92f3cb02020-07-08 17:05:13 +0200828 mutex_unlock(&smc_ib_devices.mutex);
Karsten Graul0a99be42020-05-05 15:01:20 +0200829 pr_warn_ratelimited("smc: removing ib device %s\n",
830 smcibdev->ibdev->name);
Ursula Braun0b29ec62019-11-14 13:02:47 +0100831 smc_smcr_terminate_all(smcibdev);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100832 smc_ib_cleanup_per_ibdev(smcibdev);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200833 ib_unregister_event_handler(&smcibdev->event_handler);
Karsten Graulece0d7b2020-03-10 09:33:30 +0100834 cancel_work_sync(&smcibdev->port_event_work);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100835 kfree(smcibdev);
836}
837
838static struct ib_client smc_ib_client = {
839 .name = "smc_ib",
840 .add = smc_ib_add_dev,
841 .remove = smc_ib_remove_dev,
842};
843
844int __init smc_ib_register_client(void)
845{
Hans Wippel366bb242020-02-25 22:41:21 +0100846 smc_ib_init_local_systemid();
Ursula Brauna4cf0442017-01-09 16:55:14 +0100847 return ib_register_client(&smc_ib_client);
848}
849
850void smc_ib_unregister_client(void)
851{
852 ib_unregister_client(&smc_ib_client);
853}