blob: 9f72910af1d045256534a937db695ad0409c0a0c [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Brauna4cf0442017-01-09 16:55:14 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * IB infrastructure:
6 * Establish SMC-R as an Infiniband Client to be notified about added and
7 * removed IB devices of type RDMA.
8 * Determine device and port characteristics for these IB devices.
9 *
10 * Copyright IBM Corp. 2016
11 *
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
13 */
14
15#include <linux/random.h>
Ursula Braunbd4ad572017-01-09 16:55:20 +010016#include <linux/workqueue.h>
Ursula Braun10428dd2017-07-28 13:56:22 +020017#include <linux/scatterlist.h>
Ursula Braun6dabd402019-11-16 17:47:29 +010018#include <linux/wait.h>
Ursula Braun92f3cb02020-07-08 17:05:13 +020019#include <linux/mutex.h>
Ursula Brauna4cf0442017-01-09 16:55:14 +010020#include <rdma/ib_verbs.h>
Parav Panditddb457c2018-06-05 08:40:19 +030021#include <rdma/ib_cache.h>
Ursula Brauna4cf0442017-01-09 16:55:14 +010022
Thomas Richter6812baa2017-01-09 16:55:15 +010023#include "smc_pnet.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010024#include "smc_ib.h"
Ursula Brauncd6851f2017-01-09 16:55:18 +010025#include "smc_core.h"
Ursula Braunf38ba1792017-01-09 16:55:19 +010026#include "smc_wr.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010027#include "smc.h"
Guvenc Gulcea3db10e2020-12-01 20:20:49 +010028#include "smc_netlink.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010029
Ursula Braunc9f4c6c2018-03-14 11:01:00 +010030#define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
31
Ursula Braunbd4ad572017-01-09 16:55:20 +010032#define SMC_QP_MIN_RNR_TIMER 5
33#define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
34#define SMC_QP_RETRY_CNT 7 /* 7: infinite */
35#define SMC_QP_RNR_RETRY 7 /* 7: infinite */
36
Ursula Brauna4cf0442017-01-09 16:55:14 +010037struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */
Ursula Braun92f3cb02020-07-08 17:05:13 +020038 .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex),
Ursula Brauna4cf0442017-01-09 16:55:14 +010039 .list = LIST_HEAD_INIT(smc_ib_devices.list),
40};
41
Hans Wippel366bb242020-02-25 22:41:21 +010042u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
Ursula Brauna4cf0442017-01-09 16:55:14 +010043
Ursula Braunbd4ad572017-01-09 16:55:20 +010044static int smc_ib_modify_qp_init(struct smc_link *lnk)
45{
46 struct ib_qp_attr qp_attr;
47
48 memset(&qp_attr, 0, sizeof(qp_attr));
49 qp_attr.qp_state = IB_QPS_INIT;
50 qp_attr.pkey_index = 0;
51 qp_attr.port_num = lnk->ibport;
52 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE
53 | IB_ACCESS_REMOTE_WRITE;
54 return ib_modify_qp(lnk->roce_qp, &qp_attr,
55 IB_QP_STATE | IB_QP_PKEY_INDEX |
56 IB_QP_ACCESS_FLAGS | IB_QP_PORT);
57}
58
59static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
60{
61 enum ib_qp_attr_mask qp_attr_mask =
62 IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
63 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
64 struct ib_qp_attr qp_attr;
65
66 memset(&qp_attr, 0, sizeof(qp_attr));
67 qp_attr.qp_state = IB_QPS_RTR;
68 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -040069 qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -040070 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
Ursula Braun7005ada2018-07-25 16:35:31 +020071 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -040072 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -040073 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
Ursula Braunbd4ad572017-01-09 16:55:20 +010074 sizeof(lnk->peer_mac));
75 qp_attr.dest_qp_num = lnk->peer_qpn;
76 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
77 qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming
78 * requests
79 */
80 qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER;
81
82 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
83}
84
85int smc_ib_modify_qp_rts(struct smc_link *lnk)
86{
87 struct ib_qp_attr qp_attr;
88
89 memset(&qp_attr, 0, sizeof(qp_attr));
90 qp_attr.qp_state = IB_QPS_RTS;
91 qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */
92 qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */
93 qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */
94 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
95 qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and
96 * atomic ops allowed
97 */
98 return ib_modify_qp(lnk->roce_qp, &qp_attr,
99 IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
100 IB_QP_SQ_PSN | IB_QP_RNR_RETRY |
101 IB_QP_MAX_QP_RD_ATOMIC);
102}
103
104int smc_ib_modify_qp_reset(struct smc_link *lnk)
105{
106 struct ib_qp_attr qp_attr;
107
108 memset(&qp_attr, 0, sizeof(qp_attr));
109 qp_attr.qp_state = IB_QPS_RESET;
110 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
111}
112
113int smc_ib_ready_link(struct smc_link *lnk)
114{
Stefan Raspl00e5fb22018-07-23 13:53:10 +0200115 struct smc_link_group *lgr = smc_get_lgr(lnk);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100116 int rc = 0;
117
118 rc = smc_ib_modify_qp_init(lnk);
119 if (rc)
120 goto out;
121
122 rc = smc_ib_modify_qp_rtr(lnk);
123 if (rc)
124 goto out;
125 smc_wr_remember_qp_attr(lnk);
126 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
127 IB_CQ_SOLICITED_MASK);
128 if (rc)
129 goto out;
130 rc = smc_wr_rx_post_init(lnk);
131 if (rc)
132 goto out;
133 smc_wr_remember_qp_attr(lnk);
134
135 if (lgr->role == SMC_SERV) {
136 rc = smc_ib_modify_qp_rts(lnk);
137 if (rc)
138 goto out;
139 smc_wr_remember_qp_attr(lnk);
140 }
141out:
142 return rc;
143}
144
Ursula Braun7005ada2018-07-25 16:35:31 +0200145static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport)
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200146{
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600147 const struct ib_gid_attr *attr;
Parav Pandit5102eca2019-05-02 10:48:06 +0300148 int rc;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200149
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600150 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0);
151 if (IS_ERR(attr))
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200152 return -ENODEV;
153
Parav Pandit5102eca2019-05-02 10:48:06 +0300154 rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]);
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600155 rdma_put_gid_attr(attr);
156 return rc;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200157}
158
159/* Create an identifier unique for this instance of SMC-R.
160 * The MAC-address of the first active registered IB device
161 * plus a random 2-byte number is used to create this identifier.
162 * This name is delivered to the peer during connection initialization.
163 */
164static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
165 u8 ibport)
166{
167 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
168 sizeof(smcibdev->mac[ibport - 1]));
Hans Wippel366bb242020-02-25 22:41:21 +0100169}
170
Hans Wippela082ec82020-02-25 22:41:22 +0100171bool smc_ib_is_valid_local_systemid(void)
Hans Wippel366bb242020-02-25 22:41:21 +0100172{
173 return !is_zero_ether_addr(&local_systemid[2]);
174}
175
176static void smc_ib_init_local_systemid(void)
177{
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200178 get_random_bytes(&local_systemid[0], 2);
179}
180
181bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
182{
183 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
184}
185
Karsten Graule5c47442021-10-16 11:37:45 +0200186int smc_ib_find_route(__be32 saddr, __be32 daddr,
187 u8 nexthop_mac[], u8 *uses_gateway)
188{
189 struct neighbour *neigh = NULL;
190 struct rtable *rt = NULL;
191 struct flowi4 fl4 = {
192 .saddr = saddr,
193 .daddr = daddr
194 };
195
196 if (daddr == cpu_to_be32(INADDR_NONE))
197 goto out;
198 rt = ip_route_output_flow(&init_net, &fl4, NULL);
199 if (IS_ERR(rt))
200 goto out;
201 if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET)
202 goto out;
203 neigh = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr);
204 if (neigh) {
205 memcpy(nexthop_mac, neigh->ha, ETH_ALEN);
206 *uses_gateway = rt->rt_uses_gateway;
207 return 0;
208 }
209out:
210 return -ENOENT;
211}
212
Ursula Braun7005ada2018-07-25 16:35:31 +0200213/* determine the gid for an ib-device port and vlan id */
214int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
215 unsigned short vlan_id, u8 gid[], u8 *sgid_index)
216{
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600217 const struct ib_gid_attr *attr;
Parav Pandit5102eca2019-05-02 10:48:06 +0300218 const struct net_device *ndev;
Ursula Braun7005ada2018-07-25 16:35:31 +0200219 int i;
220
221 for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600222 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i);
223 if (IS_ERR(attr))
Ursula Braun7005ada2018-07-25 16:35:31 +0200224 continue;
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600225
Parav Pandit5102eca2019-05-02 10:48:06 +0300226 rcu_read_lock();
227 ndev = rdma_read_gid_attr_ndev_rcu(attr);
228 if (!IS_ERR(ndev) &&
Karsten Graul41a0be32020-11-18 22:40:38 +0100229 ((!vlan_id && !is_vlan_dev(ndev)) ||
230 (vlan_id && is_vlan_dev(ndev) &&
231 vlan_dev_vlan_id(ndev) == vlan_id)) &&
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600232 attr->gid_type == IB_GID_TYPE_ROCE) {
Parav Pandit5102eca2019-05-02 10:48:06 +0300233 rcu_read_unlock();
Ursula Braun7005ada2018-07-25 16:35:31 +0200234 if (gid)
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600235 memcpy(gid, &attr->gid, SMC_GID_SIZE);
Ursula Braun7005ada2018-07-25 16:35:31 +0200236 if (sgid_index)
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600237 *sgid_index = attr->index;
238 rdma_put_gid_attr(attr);
Ursula Braun7005ada2018-07-25 16:35:31 +0200239 return 0;
240 }
Parav Pandit5102eca2019-05-02 10:48:06 +0300241 rcu_read_unlock();
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600242 rdma_put_gid_attr(attr);
Ursula Braun7005ada2018-07-25 16:35:31 +0200243 }
244 return -ENODEV;
245}
246
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200247static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
248{
249 int rc;
250
251 memset(&smcibdev->pattr[ibport - 1], 0,
252 sizeof(smcibdev->pattr[ibport - 1]));
253 rc = ib_query_port(smcibdev->ibdev, ibport,
254 &smcibdev->pattr[ibport - 1]);
255 if (rc)
256 goto out;
257 /* the SMC protocol requires specification of the RoCE MAC address */
Ursula Braun7005ada2018-07-25 16:35:31 +0200258 rc = smc_ib_fill_mac(smcibdev, ibport);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200259 if (rc)
260 goto out;
Hans Wippel366bb242020-02-25 22:41:21 +0100261 if (!smc_ib_is_valid_local_systemid() &&
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200262 smc_ib_port_active(smcibdev, ibport))
263 /* create unique system identifier */
264 smc_ib_define_local_systemid(smcibdev, ibport);
265out:
266 return rc;
267}
268
Ursula Braunbd4ad572017-01-09 16:55:20 +0100269/* process context wrapper for might_sleep smc_ib_remember_port_attr */
270static void smc_ib_port_event_work(struct work_struct *work)
271{
272 struct smc_ib_device *smcibdev = container_of(
273 work, struct smc_ib_device, port_event_work);
274 u8 port_idx;
275
276 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
277 smc_ib_remember_port_attr(smcibdev, port_idx + 1);
278 clear_bit(port_idx, &smcibdev->port_event_mask);
Ursula Braunc3d94942019-10-09 10:07:46 +0200279 if (!smc_ib_port_active(smcibdev, port_idx + 1)) {
280 set_bit(port_idx, smcibdev->ports_going_away);
Karsten Graul541afa12020-05-01 12:48:08 +0200281 smcr_port_err(smcibdev, port_idx + 1);
Ursula Braunc3d94942019-10-09 10:07:46 +0200282 } else {
283 clear_bit(port_idx, smcibdev->ports_going_away);
Karsten Graul1f90a052020-05-01 12:48:07 +0200284 smcr_port_add(smcibdev, port_idx + 1);
Ursula Braunc3d94942019-10-09 10:07:46 +0200285 }
Ursula Braunbd4ad572017-01-09 16:55:20 +0100286 }
287}
288
289/* can be called in IRQ context */
290static void smc_ib_global_event_handler(struct ib_event_handler *handler,
291 struct ib_event *ibevent)
292{
293 struct smc_ib_device *smcibdev;
Ursula Braun5613f202020-02-17 16:24:55 +0100294 bool schedule = false;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100295 u8 port_idx;
296
297 smcibdev = container_of(handler, struct smc_ib_device, event_handler);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100298
299 switch (ibevent->event) {
Ursula Braunbd4ad572017-01-09 16:55:20 +0100300 case IB_EVENT_DEVICE_FATAL:
Karsten Graul81cf6432019-02-12 16:29:55 +0100301 /* terminate all ports on device */
Ursula Braunc3d94942019-10-09 10:07:46 +0200302 for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
Karsten Graul81cf6432019-02-12 16:29:55 +0100303 set_bit(port_idx, &smcibdev->port_event_mask);
Ursula Braun5613f202020-02-17 16:24:55 +0100304 if (!test_and_set_bit(port_idx,
305 smcibdev->ports_going_away))
306 schedule = true;
Ursula Braunc3d94942019-10-09 10:07:46 +0200307 }
Ursula Braun5613f202020-02-17 16:24:55 +0100308 if (schedule)
309 schedule_work(&smcibdev->port_event_work);
310 break;
311 case IB_EVENT_PORT_ACTIVE:
312 port_idx = ibevent->element.port_num - 1;
313 if (port_idx >= SMC_MAX_PORTS)
314 break;
315 set_bit(port_idx, &smcibdev->port_event_mask);
316 if (test_and_clear_bit(port_idx, smcibdev->ports_going_away))
317 schedule_work(&smcibdev->port_event_work);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100318 break;
Karsten Graul81cf6432019-02-12 16:29:55 +0100319 case IB_EVENT_PORT_ERR:
Ursula Braun5613f202020-02-17 16:24:55 +0100320 port_idx = ibevent->element.port_num - 1;
321 if (port_idx >= SMC_MAX_PORTS)
322 break;
323 set_bit(port_idx, &smcibdev->port_event_mask);
324 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
325 schedule_work(&smcibdev->port_event_work);
326 break;
Karsten Graul81cf6432019-02-12 16:29:55 +0100327 case IB_EVENT_GID_CHANGE:
328 port_idx = ibevent->element.port_num - 1;
Ursula Braun5613f202020-02-17 16:24:55 +0100329 if (port_idx >= SMC_MAX_PORTS)
330 break;
331 set_bit(port_idx, &smcibdev->port_event_mask);
332 schedule_work(&smcibdev->port_event_work);
Karsten Graul81cf6432019-02-12 16:29:55 +0100333 break;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100334 default:
335 break;
336 }
337}
338
Ursula Braunf38ba1792017-01-09 16:55:19 +0100339void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
340{
Ursula Braunda05bf22018-01-26 09:28:46 +0100341 if (lnk->roce_pd)
342 ib_dealloc_pd(lnk->roce_pd);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100343 lnk->roce_pd = NULL;
344}
345
346int smc_ib_create_protection_domain(struct smc_link *lnk)
347{
348 int rc;
349
Ursula Braun897e1c22017-07-28 13:56:16 +0200350 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100351 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
352 if (IS_ERR(lnk->roce_pd))
353 lnk->roce_pd = NULL;
354 return rc;
355}
356
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100357static bool smcr_diag_is_dev_critical(struct smc_lgr_list *smc_lgr,
358 struct smc_ib_device *smcibdev)
359{
360 struct smc_link_group *lgr;
361 bool rc = false;
362 int i;
363
364 spin_lock_bh(&smc_lgr->lock);
365 list_for_each_entry(lgr, &smc_lgr->list, list) {
366 if (lgr->is_smcd)
367 continue;
368 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
369 if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
370 lgr->lnk[i].smcibdev != smcibdev)
371 continue;
372 if (lgr->type == SMC_LGR_SINGLE ||
373 lgr->type == SMC_LGR_ASYMMETRIC_LOCAL) {
374 rc = true;
375 goto out;
376 }
377 }
378 }
379out:
380 spin_unlock_bh(&smc_lgr->lock);
381 return rc;
382}
383
384static int smc_nl_handle_dev_port(struct sk_buff *skb,
385 struct ib_device *ibdev,
386 struct smc_ib_device *smcibdev,
387 int port)
388{
389 char smc_pnet[SMC_MAX_PNETID_LEN + 1];
390 struct nlattr *port_attrs;
391 unsigned char port_state;
392 int lnk_count = 0;
393
394 port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT + port);
395 if (!port_attrs)
396 goto errout;
397
398 if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR,
399 smcibdev->pnetid_by_user[port]))
400 goto errattr;
Guvenc Gulce8a446532021-01-12 17:21:22 +0100401 memcpy(smc_pnet, &smcibdev->pnetid[port], SMC_MAX_PNETID_LEN);
402 smc_pnet[SMC_MAX_PNETID_LEN] = 0;
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100403 if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
404 goto errattr;
405 if (nla_put_u32(skb, SMC_NLA_DEV_PORT_NETDEV,
406 smcibdev->ndev_ifidx[port]))
407 goto errattr;
408 if (nla_put_u8(skb, SMC_NLA_DEV_PORT_VALID, 1))
409 goto errattr;
410 port_state = smc_ib_port_active(smcibdev, port + 1);
411 if (nla_put_u8(skb, SMC_NLA_DEV_PORT_STATE, port_state))
412 goto errattr;
413 lnk_count = atomic_read(&smcibdev->lnk_cnt_by_port[port]);
414 if (nla_put_u32(skb, SMC_NLA_DEV_PORT_LNK_CNT, lnk_count))
415 goto errattr;
416 nla_nest_end(skb, port_attrs);
417 return 0;
418errattr:
419 nla_nest_cancel(skb, port_attrs);
420errout:
421 return -EMSGSIZE;
422}
423
Karsten Graul995433b2020-12-15 10:10:58 +0100424static bool smc_nl_handle_pci_values(const struct smc_pci_dev *smc_pci_dev,
425 struct sk_buff *skb)
426{
427 if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev->pci_fid))
428 return false;
429 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev->pci_pchid))
430 return false;
431 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev->pci_vendor))
432 return false;
433 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev->pci_device))
434 return false;
435 if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev->pci_id))
436 return false;
437 return true;
438}
439
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100440static int smc_nl_handle_smcr_dev(struct smc_ib_device *smcibdev,
441 struct sk_buff *skb,
442 struct netlink_callback *cb)
443{
Guvenc Gulce8a446532021-01-12 17:21:22 +0100444 char smc_ibname[IB_DEVICE_NAME_MAX];
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100445 struct smc_pci_dev smc_pci_dev;
446 struct pci_dev *pci_dev;
447 unsigned char is_crit;
448 struct nlattr *attrs;
449 void *nlh;
450 int i;
451
452 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
453 &smc_gen_nl_family, NLM_F_MULTI,
454 SMC_NETLINK_GET_DEV_SMCR);
455 if (!nlh)
456 goto errmsg;
457 attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCR);
458 if (!attrs)
459 goto errout;
460 is_crit = smcr_diag_is_dev_critical(&smc_lgr_list, smcibdev);
461 if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, is_crit))
462 goto errattr;
Karsten Graul995433b2020-12-15 10:10:58 +0100463 if (smcibdev->ibdev->dev.parent) {
464 memset(&smc_pci_dev, 0, sizeof(smc_pci_dev));
465 pci_dev = to_pci_dev(smcibdev->ibdev->dev.parent);
466 smc_set_pci_values(pci_dev, &smc_pci_dev);
467 if (!smc_nl_handle_pci_values(&smc_pci_dev, skb))
468 goto errattr;
469 }
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100470 snprintf(smc_ibname, sizeof(smc_ibname), "%s", smcibdev->ibdev->name);
471 if (nla_put_string(skb, SMC_NLA_DEV_IB_NAME, smc_ibname))
472 goto errattr;
473 for (i = 1; i <= SMC_MAX_PORTS; i++) {
474 if (!rdma_is_port_valid(smcibdev->ibdev, i))
475 continue;
476 if (smc_nl_handle_dev_port(skb, smcibdev->ibdev,
477 smcibdev, i - 1))
478 goto errattr;
479 }
480
481 nla_nest_end(skb, attrs);
482 genlmsg_end(skb, nlh);
483 return 0;
484
485errattr:
486 nla_nest_cancel(skb, attrs);
487errout:
488 genlmsg_cancel(skb, nlh);
489errmsg:
490 return -EMSGSIZE;
491}
492
493static void smc_nl_prep_smcr_dev(struct smc_ib_devices *dev_list,
494 struct sk_buff *skb,
495 struct netlink_callback *cb)
496{
497 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
498 struct smc_ib_device *smcibdev;
499 int snum = cb_ctx->pos[0];
500 int num = 0;
501
502 mutex_lock(&dev_list->mutex);
503 list_for_each_entry(smcibdev, &dev_list->list, list) {
504 if (num < snum)
505 goto next;
506 if (smc_nl_handle_smcr_dev(smcibdev, skb, cb))
507 goto errout;
508next:
509 num++;
510 }
511errout:
512 mutex_unlock(&dev_list->mutex);
513 cb_ctx->pos[0] = num;
514}
515
516int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
517{
518 smc_nl_prep_smcr_dev(&smc_ib_devices, skb, cb);
519 return skb->len;
520}
521
Ursula Braunf38ba1792017-01-09 16:55:19 +0100522static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
523{
Karsten Graule5f3aa02019-01-30 18:51:07 +0100524 struct smc_link *lnk = (struct smc_link *)priv;
525 struct smc_ib_device *smcibdev = lnk->smcibdev;
Ursula Braunda05bf22018-01-26 09:28:46 +0100526 u8 port_idx;
527
Ursula Braunf38ba1792017-01-09 16:55:19 +0100528 switch (ibevent->event) {
Karsten Graul81cf6432019-02-12 16:29:55 +0100529 case IB_EVENT_QP_FATAL:
Ursula Braunf38ba1792017-01-09 16:55:19 +0100530 case IB_EVENT_QP_ACCESS_ERR:
Karsten Graule5f3aa02019-01-30 18:51:07 +0100531 port_idx = ibevent->element.qp->port - 1;
Ursula Braun5613f202020-02-17 16:24:55 +0100532 if (port_idx >= SMC_MAX_PORTS)
533 break;
534 set_bit(port_idx, &smcibdev->port_event_mask);
535 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
Karsten Graul81cf6432019-02-12 16:29:55 +0100536 schedule_work(&smcibdev->port_event_work);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100537 break;
538 default:
539 break;
540 }
541}
542
543void smc_ib_destroy_queue_pair(struct smc_link *lnk)
544{
Ursula Braunda05bf22018-01-26 09:28:46 +0100545 if (lnk->roce_qp)
546 ib_destroy_qp(lnk->roce_qp);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100547 lnk->roce_qp = NULL;
548}
549
550/* create a queue pair within the protection domain for a link */
551int smc_ib_create_queue_pair(struct smc_link *lnk)
552{
553 struct ib_qp_init_attr qp_attr = {
554 .event_handler = smc_ib_qp_event_handler,
555 .qp_context = lnk,
556 .send_cq = lnk->smcibdev->roce_cq_send,
557 .recv_cq = lnk->smcibdev->roce_cq_recv,
558 .srq = NULL,
559 .cap = {
Ursula Braunf38ba1792017-01-09 16:55:19 +0100560 /* include unsolicited rdma_writes as well,
561 * there are max. 2 RDMA_WRITE per 1 WR_SEND
562 */
Ursula Braun652a1e42017-07-28 13:56:17 +0200563 .max_send_wr = SMC_WR_BUF_CNT * 3,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100564 .max_recv_wr = SMC_WR_BUF_CNT * 3,
565 .max_send_sge = SMC_IB_MAX_SEND_SGE,
566 .max_recv_sge = 1,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100567 },
568 .sq_sig_type = IB_SIGNAL_REQ_WR,
569 .qp_type = IB_QPT_RC,
570 };
571 int rc;
572
573 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
574 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
575 if (IS_ERR(lnk->roce_qp))
576 lnk->roce_qp = NULL;
577 else
578 smc_wr_remember_qp_attr(lnk);
579 return rc;
580}
581
Ursula Braun897e1c22017-07-28 13:56:16 +0200582void smc_ib_put_memory_region(struct ib_mr *mr)
583{
584 ib_dereg_mr(mr);
585}
586
Karsten Graul387707f2020-04-29 17:10:40 +0200587static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot, u8 link_idx)
Ursula Braun897e1c22017-07-28 13:56:16 +0200588{
589 unsigned int offset = 0;
590 int sg_num;
591
592 /* map the largest prefix of a dma mapped SG list */
Karsten Graul387707f2020-04-29 17:10:40 +0200593 sg_num = ib_map_mr_sg(buf_slot->mr_rx[link_idx],
594 buf_slot->sgt[link_idx].sgl,
595 buf_slot->sgt[link_idx].orig_nents,
Ursula Braun897e1c22017-07-28 13:56:16 +0200596 &offset, PAGE_SIZE);
597
598 return sg_num;
599}
600
601/* Allocate a memory region and map the dma mapped SG list of buf_slot */
602int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
Karsten Graul387707f2020-04-29 17:10:40 +0200603 struct smc_buf_desc *buf_slot, u8 link_idx)
Ursula Braun897e1c22017-07-28 13:56:16 +0200604{
Karsten Graul387707f2020-04-29 17:10:40 +0200605 if (buf_slot->mr_rx[link_idx])
Ursula Braun897e1c22017-07-28 13:56:16 +0200606 return 0; /* already done */
607
Karsten Graul387707f2020-04-29 17:10:40 +0200608 buf_slot->mr_rx[link_idx] =
Ursula Braun897e1c22017-07-28 13:56:16 +0200609 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
Karsten Graul387707f2020-04-29 17:10:40 +0200610 if (IS_ERR(buf_slot->mr_rx[link_idx])) {
Ursula Braun897e1c22017-07-28 13:56:16 +0200611 int rc;
612
Karsten Graul387707f2020-04-29 17:10:40 +0200613 rc = PTR_ERR(buf_slot->mr_rx[link_idx]);
614 buf_slot->mr_rx[link_idx] = NULL;
Ursula Braun897e1c22017-07-28 13:56:16 +0200615 return rc;
616 }
617
Karsten Graul387707f2020-04-29 17:10:40 +0200618 if (smc_ib_map_mr_sg(buf_slot, link_idx) != 1)
Ursula Braun897e1c22017-07-28 13:56:16 +0200619 return -EINVAL;
620
621 return 0;
622}
623
Ursula Braun10428dd2017-07-28 13:56:22 +0200624/* synchronize buffer usage for cpu access */
Karsten Graul387707f2020-04-29 17:10:40 +0200625void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
Ursula Braun10428dd2017-07-28 13:56:22 +0200626 struct smc_buf_desc *buf_slot,
627 enum dma_data_direction data_direction)
628{
629 struct scatterlist *sg;
630 unsigned int i;
631
632 /* for now there is just one DMA address */
Karsten Graul387707f2020-04-29 17:10:40 +0200633 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
634 buf_slot->sgt[lnk->link_idx].nents, i) {
Ursula Braun10428dd2017-07-28 13:56:22 +0200635 if (!sg_dma_len(sg))
636 break;
Karsten Graul387707f2020-04-29 17:10:40 +0200637 ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
Ursula Braun10428dd2017-07-28 13:56:22 +0200638 sg_dma_address(sg),
639 sg_dma_len(sg),
640 data_direction);
641 }
642}
643
644/* synchronize buffer usage for device access */
Karsten Graul387707f2020-04-29 17:10:40 +0200645void smc_ib_sync_sg_for_device(struct smc_link *lnk,
Ursula Braun10428dd2017-07-28 13:56:22 +0200646 struct smc_buf_desc *buf_slot,
647 enum dma_data_direction data_direction)
648{
649 struct scatterlist *sg;
650 unsigned int i;
651
652 /* for now there is just one DMA address */
Karsten Graul387707f2020-04-29 17:10:40 +0200653 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
654 buf_slot->sgt[lnk->link_idx].nents, i) {
Ursula Braun10428dd2017-07-28 13:56:22 +0200655 if (!sg_dma_len(sg))
656 break;
Karsten Graul387707f2020-04-29 17:10:40 +0200657 ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
Ursula Braun10428dd2017-07-28 13:56:22 +0200658 sg_dma_address(sg),
659 sg_dma_len(sg),
660 data_direction);
661 }
662}
663
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200664/* Map a new TX or RX buffer SG-table to DMA */
Karsten Graul387707f2020-04-29 17:10:40 +0200665int smc_ib_buf_map_sg(struct smc_link *lnk,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200666 struct smc_buf_desc *buf_slot,
667 enum dma_data_direction data_direction)
668{
669 int mapped_nents;
670
Karsten Graul387707f2020-04-29 17:10:40 +0200671 mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
672 buf_slot->sgt[lnk->link_idx].sgl,
673 buf_slot->sgt[lnk->link_idx].orig_nents,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200674 data_direction);
675 if (!mapped_nents)
676 return -ENOMEM;
677
678 return mapped_nents;
679}
680
Karsten Graul387707f2020-04-29 17:10:40 +0200681void smc_ib_buf_unmap_sg(struct smc_link *lnk,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200682 struct smc_buf_desc *buf_slot,
683 enum dma_data_direction data_direction)
684{
Karsten Graul387707f2020-04-29 17:10:40 +0200685 if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address)
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200686 return; /* already unmapped */
687
Karsten Graul387707f2020-04-29 17:10:40 +0200688 ib_dma_unmap_sg(lnk->smcibdev->ibdev,
689 buf_slot->sgt[lnk->link_idx].sgl,
690 buf_slot->sgt[lnk->link_idx].orig_nents,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200691 data_direction);
Karsten Graul387707f2020-04-29 17:10:40 +0200692 buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0;
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200693}
694
Ursula Braunbd4ad572017-01-09 16:55:20 +0100695long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
696{
697 struct ib_cq_init_attr cqattr = {
Ursula Braunc9f4c6c2018-03-14 11:01:00 +0100698 .cqe = SMC_MAX_CQE, .comp_vector = 0 };
699 int cqe_size_order, smc_order;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100700 long rc;
701
Karsten Graul63673592020-07-18 15:06:12 +0200702 mutex_lock(&smcibdev->mutex);
703 rc = 0;
704 if (smcibdev->initialized)
705 goto out;
Ursula Braunc9f4c6c2018-03-14 11:01:00 +0100706 /* the calculated number of cq entries fits to mlx5 cq allocation */
707 cqe_size_order = cache_line_size() == 128 ? 7 : 6;
708 smc_order = MAX_ORDER - cqe_size_order - 1;
709 if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
710 cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100711 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
712 smc_wr_tx_cq_handler, NULL,
713 smcibdev, &cqattr);
714 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
715 if (IS_ERR(smcibdev->roce_cq_send)) {
716 smcibdev->roce_cq_send = NULL;
Karsten Graul63673592020-07-18 15:06:12 +0200717 goto out;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100718 }
719 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
720 smc_wr_rx_cq_handler, NULL,
721 smcibdev, &cqattr);
722 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
723 if (IS_ERR(smcibdev->roce_cq_recv)) {
724 smcibdev->roce_cq_recv = NULL;
725 goto err;
726 }
Ursula Braunbd4ad572017-01-09 16:55:20 +0100727 smc_wr_add_dev(smcibdev);
728 smcibdev->initialized = 1;
Karsten Graul63673592020-07-18 15:06:12 +0200729 goto out;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100730
731err:
732 ib_destroy_cq(smcibdev->roce_cq_send);
Karsten Graul63673592020-07-18 15:06:12 +0200733out:
734 mutex_unlock(&smcibdev->mutex);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100735 return rc;
736}
737
738static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
739{
Karsten Graul63673592020-07-18 15:06:12 +0200740 mutex_lock(&smcibdev->mutex);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100741 if (!smcibdev->initialized)
Karsten Graul63673592020-07-18 15:06:12 +0200742 goto out;
Ursula Braunda05bf22018-01-26 09:28:46 +0100743 smcibdev->initialized = 0;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100744 ib_destroy_cq(smcibdev->roce_cq_recv);
745 ib_destroy_cq(smcibdev->roce_cq_send);
Ursula Braun6a37ad32019-11-14 13:02:46 +0100746 smc_wr_remove_dev(smcibdev);
Karsten Graul63673592020-07-18 15:06:12 +0200747out:
748 mutex_unlock(&smcibdev->mutex);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100749}
750
Ursula Brauna4cf0442017-01-09 16:55:14 +0100751static struct ib_client smc_ib_client;
752
Guvenc Gulce3d453f52020-12-01 20:20:40 +0100753static void smc_copy_netdev_ifindex(struct smc_ib_device *smcibdev, int port)
754{
755 struct ib_device *ibdev = smcibdev->ibdev;
756 struct net_device *ndev;
757
758 if (!ibdev->ops.get_netdev)
759 return;
760 ndev = ibdev->ops.get_netdev(ibdev, port + 1);
761 if (ndev) {
762 smcibdev->ndev_ifidx[port] = ndev->ifindex;
763 dev_put(ndev);
764 }
765}
766
767void smc_ib_ndev_change(struct net_device *ndev, unsigned long event)
768{
769 struct smc_ib_device *smcibdev;
770 struct ib_device *libdev;
771 struct net_device *lndev;
772 u8 port_cnt;
773 int i;
774
775 mutex_lock(&smc_ib_devices.mutex);
776 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
777 port_cnt = smcibdev->ibdev->phys_port_cnt;
778 for (i = 0; i < min_t(size_t, port_cnt, SMC_MAX_PORTS); i++) {
779 libdev = smcibdev->ibdev;
780 if (!libdev->ops.get_netdev)
781 continue;
782 lndev = libdev->ops.get_netdev(libdev, i + 1);
Yajun Deng1160dfa2021-08-05 19:55:27 +0800783 dev_put(lndev);
Guvenc Gulce3d453f52020-12-01 20:20:40 +0100784 if (lndev != ndev)
785 continue;
786 if (event == NETDEV_REGISTER)
787 smcibdev->ndev_ifidx[i] = ndev->ifindex;
788 if (event == NETDEV_UNREGISTER)
789 smcibdev->ndev_ifidx[i] = 0;
790 }
791 }
792 mutex_unlock(&smc_ib_devices.mutex);
793}
794
Ursula Brauna4cf0442017-01-09 16:55:14 +0100795/* callback function for ib_register_client() */
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300796static int smc_ib_add_dev(struct ib_device *ibdev)
Ursula Brauna4cf0442017-01-09 16:55:14 +0100797{
798 struct smc_ib_device *smcibdev;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200799 u8 port_cnt;
800 int i;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100801
802 if (ibdev->node_type != RDMA_NODE_IB_CA)
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300803 return -EOPNOTSUPP;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100804
805 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
806 if (!smcibdev)
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300807 return -ENOMEM;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100808
809 smcibdev->ibdev = ibdev;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100810 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
Ursula Braun6dabd402019-11-16 17:47:29 +0100811 atomic_set(&smcibdev->lnk_cnt, 0);
812 init_waitqueue_head(&smcibdev->lnks_deleted);
Karsten Graul63673592020-07-18 15:06:12 +0200813 mutex_init(&smcibdev->mutex);
Ursula Braun92f3cb02020-07-08 17:05:13 +0200814 mutex_lock(&smc_ib_devices.mutex);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100815 list_add_tail(&smcibdev->list, &smc_ib_devices.list);
Ursula Braun92f3cb02020-07-08 17:05:13 +0200816 mutex_unlock(&smc_ib_devices.mutex);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100817 ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200818 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
819 smc_ib_global_event_handler);
820 ib_register_event_handler(&smcibdev->event_handler);
821
822 /* trigger reading of the port attributes */
823 port_cnt = smcibdev->ibdev->phys_port_cnt;
Karsten Graul0a99be42020-05-05 15:01:20 +0200824 pr_warn_ratelimited("smc: adding ib device %s with port count %d\n",
825 smcibdev->ibdev->name, port_cnt);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200826 for (i = 0;
827 i < min_t(size_t, port_cnt, SMC_MAX_PORTS);
Ursula Braun0afff912018-06-28 19:05:05 +0200828 i++) {
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200829 set_bit(i, &smcibdev->port_event_mask);
Ursula Braun0afff912018-06-28 19:05:05 +0200830 /* determine pnetids of the port */
Karsten Graulfdff7042020-04-29 17:10:37 +0200831 if (smc_pnetid_by_dev_port(ibdev->dev.parent, i,
832 smcibdev->pnetid[i]))
833 smc_pnetid_by_table_ib(smcibdev, i + 1);
Guvenc Gulce3d453f52020-12-01 20:20:40 +0100834 smc_copy_netdev_ifindex(smcibdev, i);
Karsten Graul0a99be42020-05-05 15:01:20 +0200835 pr_warn_ratelimited("smc: ib device %s port %d has pnetid "
836 "%.16s%s\n",
837 smcibdev->ibdev->name, i + 1,
838 smcibdev->pnetid[i],
839 smcibdev->pnetid_by_user[i] ?
840 " (user defined)" :
841 "");
Ursula Braun0afff912018-06-28 19:05:05 +0200842 }
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200843 schedule_work(&smcibdev->port_event_work);
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300844 return 0;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100845}
846
Ursula Braun0b29ec62019-11-14 13:02:47 +0100847/* callback function for ib_unregister_client() */
Ursula Brauna4cf0442017-01-09 16:55:14 +0100848static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
849{
Jason Gunthorpe15879822020-04-07 20:20:09 -0300850 struct smc_ib_device *smcibdev = client_data;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100851
Ursula Braun92f3cb02020-07-08 17:05:13 +0200852 mutex_lock(&smc_ib_devices.mutex);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100853 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
Ursula Braun92f3cb02020-07-08 17:05:13 +0200854 mutex_unlock(&smc_ib_devices.mutex);
Karsten Graul0a99be42020-05-05 15:01:20 +0200855 pr_warn_ratelimited("smc: removing ib device %s\n",
856 smcibdev->ibdev->name);
Ursula Braun0b29ec62019-11-14 13:02:47 +0100857 smc_smcr_terminate_all(smcibdev);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100858 smc_ib_cleanup_per_ibdev(smcibdev);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200859 ib_unregister_event_handler(&smcibdev->event_handler);
Karsten Graulece0d7b2020-03-10 09:33:30 +0100860 cancel_work_sync(&smcibdev->port_event_work);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100861 kfree(smcibdev);
862}
863
864static struct ib_client smc_ib_client = {
865 .name = "smc_ib",
866 .add = smc_ib_add_dev,
867 .remove = smc_ib_remove_dev,
868};
869
870int __init smc_ib_register_client(void)
871{
Hans Wippel366bb242020-02-25 22:41:21 +0100872 smc_ib_init_local_systemid();
Ursula Brauna4cf0442017-01-09 16:55:14 +0100873 return ib_register_client(&smc_ib_client);
874}
875
876void smc_ib_unregister_client(void)
877{
878 ib_unregister_client(&smc_ib_client);
879}