blob: d93055ec17ae86cbaee63cc35943a71a118d1a74 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Ursula Brauna4cf0442017-01-09 16:55:14 +01002/*
3 * Shared Memory Communications over RDMA (SMC-R) and RoCE
4 *
5 * IB infrastructure:
6 * Establish SMC-R as an Infiniband Client to be notified about added and
7 * removed IB devices of type RDMA.
8 * Determine device and port characteristics for these IB devices.
9 *
10 * Copyright IBM Corp. 2016
11 *
12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com>
13 */
14
15#include <linux/random.h>
Ursula Braunbd4ad572017-01-09 16:55:20 +010016#include <linux/workqueue.h>
Ursula Braun10428dd2017-07-28 13:56:22 +020017#include <linux/scatterlist.h>
Ursula Braun6dabd402019-11-16 17:47:29 +010018#include <linux/wait.h>
Ursula Braun92f3cb02020-07-08 17:05:13 +020019#include <linux/mutex.h>
Karsten Graul24fb6812021-10-16 11:37:48 +020020#include <linux/inetdevice.h>
Ursula Brauna4cf0442017-01-09 16:55:14 +010021#include <rdma/ib_verbs.h>
Parav Panditddb457c2018-06-05 08:40:19 +030022#include <rdma/ib_cache.h>
Ursula Brauna4cf0442017-01-09 16:55:14 +010023
Thomas Richter6812baa2017-01-09 16:55:15 +010024#include "smc_pnet.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010025#include "smc_ib.h"
Ursula Brauncd6851f2017-01-09 16:55:18 +010026#include "smc_core.h"
Ursula Braunf38ba1792017-01-09 16:55:19 +010027#include "smc_wr.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010028#include "smc.h"
Guvenc Gulcea3db10e2020-12-01 20:20:49 +010029#include "smc_netlink.h"
Ursula Brauna4cf0442017-01-09 16:55:14 +010030
Ursula Braunc9f4c6c2018-03-14 11:01:00 +010031#define SMC_MAX_CQE 32766 /* max. # of completion queue elements */
32
Ursula Braunbd4ad572017-01-09 16:55:20 +010033#define SMC_QP_MIN_RNR_TIMER 5
34#define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */
35#define SMC_QP_RETRY_CNT 7 /* 7: infinite */
36#define SMC_QP_RNR_RETRY 7 /* 7: infinite */
37
Ursula Brauna4cf0442017-01-09 16:55:14 +010038struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */
Ursula Braun92f3cb02020-07-08 17:05:13 +020039 .mutex = __MUTEX_INITIALIZER(smc_ib_devices.mutex),
Ursula Brauna4cf0442017-01-09 16:55:14 +010040 .list = LIST_HEAD_INIT(smc_ib_devices.list),
41};
42
Hans Wippel366bb242020-02-25 22:41:21 +010043u8 local_systemid[SMC_SYSTEMID_LEN]; /* unique system identifier */
Ursula Brauna4cf0442017-01-09 16:55:14 +010044
Ursula Braunbd4ad572017-01-09 16:55:20 +010045static int smc_ib_modify_qp_init(struct smc_link *lnk)
46{
47 struct ib_qp_attr qp_attr;
48
49 memset(&qp_attr, 0, sizeof(qp_attr));
50 qp_attr.qp_state = IB_QPS_INIT;
51 qp_attr.pkey_index = 0;
52 qp_attr.port_num = lnk->ibport;
53 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE
54 | IB_ACCESS_REMOTE_WRITE;
55 return ib_modify_qp(lnk->roce_qp, &qp_attr,
56 IB_QP_STATE | IB_QP_PKEY_INDEX |
57 IB_QP_ACCESS_FLAGS | IB_QP_PORT);
58}
59
60static int smc_ib_modify_qp_rtr(struct smc_link *lnk)
61{
62 enum ib_qp_attr_mask qp_attr_mask =
63 IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN |
64 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER;
65 struct ib_qp_attr qp_attr;
Karsten Graul24fb6812021-10-16 11:37:48 +020066 u8 hop_lim = 1;
Ursula Braunbd4ad572017-01-09 16:55:20 +010067
68 memset(&qp_attr, 0, sizeof(qp_attr));
69 qp_attr.qp_state = IB_QPS_RTR;
70 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu);
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -040071 qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -040072 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport);
Karsten Graul24fb6812021-10-16 11:37:48 +020073 if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway)
74 hop_lim = IPV6_DEFAULT_HOPLIMIT;
75 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, hop_lim, 0);
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -040076 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid);
Karsten Graul24fb6812021-10-16 11:37:48 +020077 if (lnk->lgr->smc_version == SMC_V2 && lnk->lgr->uses_gateway)
78 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->lgr->nexthop_mac,
79 sizeof(lnk->lgr->nexthop_mac));
80 else
81 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac,
82 sizeof(lnk->peer_mac));
Ursula Braunbd4ad572017-01-09 16:55:20 +010083 qp_attr.dest_qp_num = lnk->peer_qpn;
84 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */
85 qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming
86 * requests
87 */
88 qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER;
89
90 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask);
91}
92
93int smc_ib_modify_qp_rts(struct smc_link *lnk)
94{
95 struct ib_qp_attr qp_attr;
96
97 memset(&qp_attr, 0, sizeof(qp_attr));
98 qp_attr.qp_state = IB_QPS_RTS;
99 qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */
100 qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */
101 qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */
102 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */
103 qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and
104 * atomic ops allowed
105 */
106 return ib_modify_qp(lnk->roce_qp, &qp_attr,
107 IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT |
108 IB_QP_SQ_PSN | IB_QP_RNR_RETRY |
109 IB_QP_MAX_QP_RD_ATOMIC);
110}
111
112int smc_ib_modify_qp_reset(struct smc_link *lnk)
113{
114 struct ib_qp_attr qp_attr;
115
116 memset(&qp_attr, 0, sizeof(qp_attr));
117 qp_attr.qp_state = IB_QPS_RESET;
118 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE);
119}
120
121int smc_ib_ready_link(struct smc_link *lnk)
122{
Stefan Raspl00e5fb22018-07-23 13:53:10 +0200123 struct smc_link_group *lgr = smc_get_lgr(lnk);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100124 int rc = 0;
125
126 rc = smc_ib_modify_qp_init(lnk);
127 if (rc)
128 goto out;
129
130 rc = smc_ib_modify_qp_rtr(lnk);
131 if (rc)
132 goto out;
133 smc_wr_remember_qp_attr(lnk);
134 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv,
135 IB_CQ_SOLICITED_MASK);
136 if (rc)
137 goto out;
138 rc = smc_wr_rx_post_init(lnk);
139 if (rc)
140 goto out;
141 smc_wr_remember_qp_attr(lnk);
142
143 if (lgr->role == SMC_SERV) {
144 rc = smc_ib_modify_qp_rts(lnk);
145 if (rc)
146 goto out;
147 smc_wr_remember_qp_attr(lnk);
148 }
149out:
150 return rc;
151}
152
Ursula Braun7005ada2018-07-25 16:35:31 +0200153static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport)
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200154{
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600155 const struct ib_gid_attr *attr;
Parav Pandit5102eca2019-05-02 10:48:06 +0300156 int rc;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200157
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600158 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0);
159 if (IS_ERR(attr))
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200160 return -ENODEV;
161
Parav Pandit5102eca2019-05-02 10:48:06 +0300162 rc = rdma_read_gid_l2_fields(attr, NULL, smcibdev->mac[ibport - 1]);
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600163 rdma_put_gid_attr(attr);
164 return rc;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200165}
166
167/* Create an identifier unique for this instance of SMC-R.
168 * The MAC-address of the first active registered IB device
169 * plus a random 2-byte number is used to create this identifier.
170 * This name is delivered to the peer during connection initialization.
171 */
172static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev,
173 u8 ibport)
174{
175 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1],
176 sizeof(smcibdev->mac[ibport - 1]));
Hans Wippel366bb242020-02-25 22:41:21 +0100177}
178
Hans Wippela082ec82020-02-25 22:41:22 +0100179bool smc_ib_is_valid_local_systemid(void)
Hans Wippel366bb242020-02-25 22:41:21 +0100180{
181 return !is_zero_ether_addr(&local_systemid[2]);
182}
183
184static void smc_ib_init_local_systemid(void)
185{
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200186 get_random_bytes(&local_systemid[0], 2);
187}
188
189bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport)
190{
191 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE;
192}
193
Karsten Graule5c47442021-10-16 11:37:45 +0200194int smc_ib_find_route(__be32 saddr, __be32 daddr,
195 u8 nexthop_mac[], u8 *uses_gateway)
196{
197 struct neighbour *neigh = NULL;
198 struct rtable *rt = NULL;
199 struct flowi4 fl4 = {
200 .saddr = saddr,
201 .daddr = daddr
202 };
203
204 if (daddr == cpu_to_be32(INADDR_NONE))
205 goto out;
206 rt = ip_route_output_flow(&init_net, &fl4, NULL);
207 if (IS_ERR(rt))
208 goto out;
209 if (rt->rt_uses_gateway && rt->rt_gw_family != AF_INET)
210 goto out;
211 neigh = rt->dst.ops->neigh_lookup(&rt->dst, NULL, &fl4.daddr);
212 if (neigh) {
213 memcpy(nexthop_mac, neigh->ha, ETH_ALEN);
214 *uses_gateway = rt->rt_uses_gateway;
215 return 0;
216 }
217out:
218 return -ENOENT;
219}
220
Karsten Graul24fb6812021-10-16 11:37:48 +0200221static int smc_ib_determine_gid_rcu(const struct net_device *ndev,
222 const struct ib_gid_attr *attr,
223 u8 gid[], u8 *sgid_index,
224 struct smc_init_info_smcrv2 *smcrv2)
225{
226 if (!smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE) {
227 if (gid)
228 memcpy(gid, &attr->gid, SMC_GID_SIZE);
229 if (sgid_index)
230 *sgid_index = attr->index;
231 return 0;
232 }
233 if (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
234 smc_ib_gid_to_ipv4((u8 *)&attr->gid) != cpu_to_be32(INADDR_NONE)) {
235 struct in_device *in_dev = __in_dev_get_rcu(ndev);
236 const struct in_ifaddr *ifa;
237 bool subnet_match = false;
238
239 if (!in_dev)
240 goto out;
241 in_dev_for_each_ifa_rcu(ifa, in_dev) {
242 if (!inet_ifa_match(smcrv2->saddr, ifa))
243 continue;
244 subnet_match = true;
245 break;
246 }
247 if (!subnet_match)
248 goto out;
249 if (smcrv2->daddr && smc_ib_find_route(smcrv2->saddr,
250 smcrv2->daddr,
251 smcrv2->nexthop_mac,
252 &smcrv2->uses_gateway))
253 goto out;
254
255 if (gid)
256 memcpy(gid, &attr->gid, SMC_GID_SIZE);
257 if (sgid_index)
258 *sgid_index = attr->index;
259 return 0;
260 }
261out:
262 return -ENODEV;
263}
264
Ursula Braun7005ada2018-07-25 16:35:31 +0200265/* determine the gid for an ib-device port and vlan id */
266int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport,
Karsten Graul24fb6812021-10-16 11:37:48 +0200267 unsigned short vlan_id, u8 gid[], u8 *sgid_index,
268 struct smc_init_info_smcrv2 *smcrv2)
Ursula Braun7005ada2018-07-25 16:35:31 +0200269{
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600270 const struct ib_gid_attr *attr;
Parav Pandit5102eca2019-05-02 10:48:06 +0300271 const struct net_device *ndev;
Ursula Braun7005ada2018-07-25 16:35:31 +0200272 int i;
273
274 for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600275 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i);
276 if (IS_ERR(attr))
Ursula Braun7005ada2018-07-25 16:35:31 +0200277 continue;
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600278
Parav Pandit5102eca2019-05-02 10:48:06 +0300279 rcu_read_lock();
280 ndev = rdma_read_gid_attr_ndev_rcu(attr);
281 if (!IS_ERR(ndev) &&
Karsten Graul41a0be32020-11-18 22:40:38 +0100282 ((!vlan_id && !is_vlan_dev(ndev)) ||
283 (vlan_id && is_vlan_dev(ndev) &&
Karsten Graul24fb6812021-10-16 11:37:48 +0200284 vlan_dev_vlan_id(ndev) == vlan_id))) {
285 if (!smc_ib_determine_gid_rcu(ndev, attr, gid,
286 sgid_index, smcrv2)) {
287 rcu_read_unlock();
288 rdma_put_gid_attr(attr);
289 return 0;
290 }
Ursula Braun7005ada2018-07-25 16:35:31 +0200291 }
Parav Pandit5102eca2019-05-02 10:48:06 +0300292 rcu_read_unlock();
Jason Gunthorpeb4c296f2018-08-17 16:45:51 -0600293 rdma_put_gid_attr(attr);
Ursula Braun7005ada2018-07-25 16:35:31 +0200294 }
295 return -ENODEV;
296}
297
Karsten Graul29397e32021-10-16 11:37:52 +0200298/* check if gid is still defined on smcibdev */
299static bool smc_ib_check_link_gid(u8 gid[SMC_GID_SIZE], bool smcrv2,
300 struct smc_ib_device *smcibdev, u8 ibport)
301{
302 const struct ib_gid_attr *attr;
303 bool rc = false;
304 int i;
305
306 for (i = 0; !rc && i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) {
307 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i);
308 if (IS_ERR(attr))
309 continue;
310
311 rcu_read_lock();
312 if ((!smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE) ||
313 (smcrv2 && attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
314 !(ipv6_addr_type((const struct in6_addr *)&attr->gid)
315 & IPV6_ADDR_LINKLOCAL)))
316 if (!memcmp(gid, &attr->gid, SMC_GID_SIZE))
317 rc = true;
318 rcu_read_unlock();
319 rdma_put_gid_attr(attr);
320 }
321 return rc;
322}
323
324/* check all links if the gid is still defined on smcibdev */
325static void smc_ib_gid_check(struct smc_ib_device *smcibdev, u8 ibport)
326{
327 struct smc_link_group *lgr;
328 int i;
329
330 spin_lock_bh(&smc_lgr_list.lock);
331 list_for_each_entry(lgr, &smc_lgr_list.list, list) {
332 if (strncmp(smcibdev->pnetid[ibport - 1], lgr->pnet_id,
333 SMC_MAX_PNETID_LEN))
334 continue; /* lgr is not affected */
335 if (list_empty(&lgr->list))
336 continue;
337 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
338 if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
339 lgr->lnk[i].smcibdev != smcibdev)
340 continue;
341 if (!smc_ib_check_link_gid(lgr->lnk[i].gid,
342 lgr->smc_version == SMC_V2,
343 smcibdev, ibport))
344 smcr_port_err(smcibdev, ibport);
345 }
346 }
347 spin_unlock_bh(&smc_lgr_list.lock);
348}
349
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200350static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport)
351{
352 int rc;
353
354 memset(&smcibdev->pattr[ibport - 1], 0,
355 sizeof(smcibdev->pattr[ibport - 1]));
356 rc = ib_query_port(smcibdev->ibdev, ibport,
357 &smcibdev->pattr[ibport - 1]);
358 if (rc)
359 goto out;
360 /* the SMC protocol requires specification of the RoCE MAC address */
Ursula Braun7005ada2018-07-25 16:35:31 +0200361 rc = smc_ib_fill_mac(smcibdev, ibport);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200362 if (rc)
363 goto out;
Hans Wippel366bb242020-02-25 22:41:21 +0100364 if (!smc_ib_is_valid_local_systemid() &&
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200365 smc_ib_port_active(smcibdev, ibport))
366 /* create unique system identifier */
367 smc_ib_define_local_systemid(smcibdev, ibport);
368out:
369 return rc;
370}
371
Ursula Braunbd4ad572017-01-09 16:55:20 +0100372/* process context wrapper for might_sleep smc_ib_remember_port_attr */
373static void smc_ib_port_event_work(struct work_struct *work)
374{
375 struct smc_ib_device *smcibdev = container_of(
376 work, struct smc_ib_device, port_event_work);
377 u8 port_idx;
378
379 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) {
380 smc_ib_remember_port_attr(smcibdev, port_idx + 1);
381 clear_bit(port_idx, &smcibdev->port_event_mask);
Ursula Braunc3d94942019-10-09 10:07:46 +0200382 if (!smc_ib_port_active(smcibdev, port_idx + 1)) {
383 set_bit(port_idx, smcibdev->ports_going_away);
Karsten Graul541afa12020-05-01 12:48:08 +0200384 smcr_port_err(smcibdev, port_idx + 1);
Ursula Braunc3d94942019-10-09 10:07:46 +0200385 } else {
386 clear_bit(port_idx, smcibdev->ports_going_away);
Karsten Graul1f90a052020-05-01 12:48:07 +0200387 smcr_port_add(smcibdev, port_idx + 1);
Karsten Graul29397e32021-10-16 11:37:52 +0200388 smc_ib_gid_check(smcibdev, port_idx + 1);
Ursula Braunc3d94942019-10-09 10:07:46 +0200389 }
Ursula Braunbd4ad572017-01-09 16:55:20 +0100390 }
391}
392
393/* can be called in IRQ context */
394static void smc_ib_global_event_handler(struct ib_event_handler *handler,
395 struct ib_event *ibevent)
396{
397 struct smc_ib_device *smcibdev;
Ursula Braun5613f202020-02-17 16:24:55 +0100398 bool schedule = false;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100399 u8 port_idx;
400
401 smcibdev = container_of(handler, struct smc_ib_device, event_handler);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100402
403 switch (ibevent->event) {
Ursula Braunbd4ad572017-01-09 16:55:20 +0100404 case IB_EVENT_DEVICE_FATAL:
Karsten Graul81cf6432019-02-12 16:29:55 +0100405 /* terminate all ports on device */
Ursula Braunc3d94942019-10-09 10:07:46 +0200406 for (port_idx = 0; port_idx < SMC_MAX_PORTS; port_idx++) {
Karsten Graul81cf6432019-02-12 16:29:55 +0100407 set_bit(port_idx, &smcibdev->port_event_mask);
Ursula Braun5613f202020-02-17 16:24:55 +0100408 if (!test_and_set_bit(port_idx,
409 smcibdev->ports_going_away))
410 schedule = true;
Ursula Braunc3d94942019-10-09 10:07:46 +0200411 }
Ursula Braun5613f202020-02-17 16:24:55 +0100412 if (schedule)
413 schedule_work(&smcibdev->port_event_work);
414 break;
415 case IB_EVENT_PORT_ACTIVE:
416 port_idx = ibevent->element.port_num - 1;
417 if (port_idx >= SMC_MAX_PORTS)
418 break;
419 set_bit(port_idx, &smcibdev->port_event_mask);
420 if (test_and_clear_bit(port_idx, smcibdev->ports_going_away))
421 schedule_work(&smcibdev->port_event_work);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100422 break;
Karsten Graul81cf6432019-02-12 16:29:55 +0100423 case IB_EVENT_PORT_ERR:
Ursula Braun5613f202020-02-17 16:24:55 +0100424 port_idx = ibevent->element.port_num - 1;
425 if (port_idx >= SMC_MAX_PORTS)
426 break;
427 set_bit(port_idx, &smcibdev->port_event_mask);
428 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
429 schedule_work(&smcibdev->port_event_work);
430 break;
Karsten Graul81cf6432019-02-12 16:29:55 +0100431 case IB_EVENT_GID_CHANGE:
432 port_idx = ibevent->element.port_num - 1;
Ursula Braun5613f202020-02-17 16:24:55 +0100433 if (port_idx >= SMC_MAX_PORTS)
434 break;
435 set_bit(port_idx, &smcibdev->port_event_mask);
436 schedule_work(&smcibdev->port_event_work);
Karsten Graul81cf6432019-02-12 16:29:55 +0100437 break;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100438 default:
439 break;
440 }
441}
442
Ursula Braunf38ba1792017-01-09 16:55:19 +0100443void smc_ib_dealloc_protection_domain(struct smc_link *lnk)
444{
Ursula Braunda05bf22018-01-26 09:28:46 +0100445 if (lnk->roce_pd)
446 ib_dealloc_pd(lnk->roce_pd);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100447 lnk->roce_pd = NULL;
448}
449
450int smc_ib_create_protection_domain(struct smc_link *lnk)
451{
452 int rc;
453
Ursula Braun897e1c22017-07-28 13:56:16 +0200454 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100455 rc = PTR_ERR_OR_ZERO(lnk->roce_pd);
456 if (IS_ERR(lnk->roce_pd))
457 lnk->roce_pd = NULL;
458 return rc;
459}
460
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100461static bool smcr_diag_is_dev_critical(struct smc_lgr_list *smc_lgr,
462 struct smc_ib_device *smcibdev)
463{
464 struct smc_link_group *lgr;
465 bool rc = false;
466 int i;
467
468 spin_lock_bh(&smc_lgr->lock);
469 list_for_each_entry(lgr, &smc_lgr->list, list) {
470 if (lgr->is_smcd)
471 continue;
472 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) {
473 if (lgr->lnk[i].state == SMC_LNK_UNUSED ||
474 lgr->lnk[i].smcibdev != smcibdev)
475 continue;
476 if (lgr->type == SMC_LGR_SINGLE ||
477 lgr->type == SMC_LGR_ASYMMETRIC_LOCAL) {
478 rc = true;
479 goto out;
480 }
481 }
482 }
483out:
484 spin_unlock_bh(&smc_lgr->lock);
485 return rc;
486}
487
488static int smc_nl_handle_dev_port(struct sk_buff *skb,
489 struct ib_device *ibdev,
490 struct smc_ib_device *smcibdev,
491 int port)
492{
493 char smc_pnet[SMC_MAX_PNETID_LEN + 1];
494 struct nlattr *port_attrs;
495 unsigned char port_state;
496 int lnk_count = 0;
497
498 port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT + port);
499 if (!port_attrs)
500 goto errout;
501
502 if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR,
503 smcibdev->pnetid_by_user[port]))
504 goto errattr;
Guvenc Gulce8a446532021-01-12 17:21:22 +0100505 memcpy(smc_pnet, &smcibdev->pnetid[port], SMC_MAX_PNETID_LEN);
506 smc_pnet[SMC_MAX_PNETID_LEN] = 0;
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100507 if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet))
508 goto errattr;
509 if (nla_put_u32(skb, SMC_NLA_DEV_PORT_NETDEV,
510 smcibdev->ndev_ifidx[port]))
511 goto errattr;
512 if (nla_put_u8(skb, SMC_NLA_DEV_PORT_VALID, 1))
513 goto errattr;
514 port_state = smc_ib_port_active(smcibdev, port + 1);
515 if (nla_put_u8(skb, SMC_NLA_DEV_PORT_STATE, port_state))
516 goto errattr;
517 lnk_count = atomic_read(&smcibdev->lnk_cnt_by_port[port]);
518 if (nla_put_u32(skb, SMC_NLA_DEV_PORT_LNK_CNT, lnk_count))
519 goto errattr;
520 nla_nest_end(skb, port_attrs);
521 return 0;
522errattr:
523 nla_nest_cancel(skb, port_attrs);
524errout:
525 return -EMSGSIZE;
526}
527
Karsten Graul995433b2020-12-15 10:10:58 +0100528static bool smc_nl_handle_pci_values(const struct smc_pci_dev *smc_pci_dev,
529 struct sk_buff *skb)
530{
531 if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev->pci_fid))
532 return false;
533 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev->pci_pchid))
534 return false;
535 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev->pci_vendor))
536 return false;
537 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev->pci_device))
538 return false;
539 if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev->pci_id))
540 return false;
541 return true;
542}
543
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100544static int smc_nl_handle_smcr_dev(struct smc_ib_device *smcibdev,
545 struct sk_buff *skb,
546 struct netlink_callback *cb)
547{
Guvenc Gulce8a446532021-01-12 17:21:22 +0100548 char smc_ibname[IB_DEVICE_NAME_MAX];
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100549 struct smc_pci_dev smc_pci_dev;
550 struct pci_dev *pci_dev;
551 unsigned char is_crit;
552 struct nlattr *attrs;
553 void *nlh;
554 int i;
555
556 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
557 &smc_gen_nl_family, NLM_F_MULTI,
558 SMC_NETLINK_GET_DEV_SMCR);
559 if (!nlh)
560 goto errmsg;
561 attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCR);
562 if (!attrs)
563 goto errout;
564 is_crit = smcr_diag_is_dev_critical(&smc_lgr_list, smcibdev);
565 if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, is_crit))
566 goto errattr;
Karsten Graul995433b2020-12-15 10:10:58 +0100567 if (smcibdev->ibdev->dev.parent) {
568 memset(&smc_pci_dev, 0, sizeof(smc_pci_dev));
569 pci_dev = to_pci_dev(smcibdev->ibdev->dev.parent);
570 smc_set_pci_values(pci_dev, &smc_pci_dev);
571 if (!smc_nl_handle_pci_values(&smc_pci_dev, skb))
572 goto errattr;
573 }
Guvenc Gulcea3db10e2020-12-01 20:20:49 +0100574 snprintf(smc_ibname, sizeof(smc_ibname), "%s", smcibdev->ibdev->name);
575 if (nla_put_string(skb, SMC_NLA_DEV_IB_NAME, smc_ibname))
576 goto errattr;
577 for (i = 1; i <= SMC_MAX_PORTS; i++) {
578 if (!rdma_is_port_valid(smcibdev->ibdev, i))
579 continue;
580 if (smc_nl_handle_dev_port(skb, smcibdev->ibdev,
581 smcibdev, i - 1))
582 goto errattr;
583 }
584
585 nla_nest_end(skb, attrs);
586 genlmsg_end(skb, nlh);
587 return 0;
588
589errattr:
590 nla_nest_cancel(skb, attrs);
591errout:
592 genlmsg_cancel(skb, nlh);
593errmsg:
594 return -EMSGSIZE;
595}
596
597static void smc_nl_prep_smcr_dev(struct smc_ib_devices *dev_list,
598 struct sk_buff *skb,
599 struct netlink_callback *cb)
600{
601 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb);
602 struct smc_ib_device *smcibdev;
603 int snum = cb_ctx->pos[0];
604 int num = 0;
605
606 mutex_lock(&dev_list->mutex);
607 list_for_each_entry(smcibdev, &dev_list->list, list) {
608 if (num < snum)
609 goto next;
610 if (smc_nl_handle_smcr_dev(smcibdev, skb, cb))
611 goto errout;
612next:
613 num++;
614 }
615errout:
616 mutex_unlock(&dev_list->mutex);
617 cb_ctx->pos[0] = num;
618}
619
620int smcr_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb)
621{
622 smc_nl_prep_smcr_dev(&smc_ib_devices, skb, cb);
623 return skb->len;
624}
625
Ursula Braunf38ba1792017-01-09 16:55:19 +0100626static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv)
627{
Karsten Graule5f3aa02019-01-30 18:51:07 +0100628 struct smc_link *lnk = (struct smc_link *)priv;
629 struct smc_ib_device *smcibdev = lnk->smcibdev;
Ursula Braunda05bf22018-01-26 09:28:46 +0100630 u8 port_idx;
631
Ursula Braunf38ba1792017-01-09 16:55:19 +0100632 switch (ibevent->event) {
Karsten Graul81cf6432019-02-12 16:29:55 +0100633 case IB_EVENT_QP_FATAL:
Ursula Braunf38ba1792017-01-09 16:55:19 +0100634 case IB_EVENT_QP_ACCESS_ERR:
Karsten Graule5f3aa02019-01-30 18:51:07 +0100635 port_idx = ibevent->element.qp->port - 1;
Ursula Braun5613f202020-02-17 16:24:55 +0100636 if (port_idx >= SMC_MAX_PORTS)
637 break;
638 set_bit(port_idx, &smcibdev->port_event_mask);
639 if (!test_and_set_bit(port_idx, smcibdev->ports_going_away))
Karsten Graul81cf6432019-02-12 16:29:55 +0100640 schedule_work(&smcibdev->port_event_work);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100641 break;
642 default:
643 break;
644 }
645}
646
647void smc_ib_destroy_queue_pair(struct smc_link *lnk)
648{
Ursula Braunda05bf22018-01-26 09:28:46 +0100649 if (lnk->roce_qp)
650 ib_destroy_qp(lnk->roce_qp);
Ursula Braunf38ba1792017-01-09 16:55:19 +0100651 lnk->roce_qp = NULL;
652}
653
654/* create a queue pair within the protection domain for a link */
655int smc_ib_create_queue_pair(struct smc_link *lnk)
656{
Karsten Graul8799e312021-10-16 11:37:49 +0200657 int sges_per_buf = (lnk->lgr->smc_version == SMC_V2) ? 2 : 1;
Ursula Braunf38ba1792017-01-09 16:55:19 +0100658 struct ib_qp_init_attr qp_attr = {
659 .event_handler = smc_ib_qp_event_handler,
660 .qp_context = lnk,
661 .send_cq = lnk->smcibdev->roce_cq_send,
662 .recv_cq = lnk->smcibdev->roce_cq_recv,
663 .srq = NULL,
664 .cap = {
Ursula Braunf38ba1792017-01-09 16:55:19 +0100665 /* include unsolicited rdma_writes as well,
666 * there are max. 2 RDMA_WRITE per 1 WR_SEND
667 */
Ursula Braun652a1e42017-07-28 13:56:17 +0200668 .max_send_wr = SMC_WR_BUF_CNT * 3,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100669 .max_recv_wr = SMC_WR_BUF_CNT * 3,
670 .max_send_sge = SMC_IB_MAX_SEND_SGE,
Karsten Graul8799e312021-10-16 11:37:49 +0200671 .max_recv_sge = sges_per_buf,
Ursula Braunf38ba1792017-01-09 16:55:19 +0100672 },
673 .sq_sig_type = IB_SIGNAL_REQ_WR,
674 .qp_type = IB_QPT_RC,
675 };
676 int rc;
677
678 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr);
679 rc = PTR_ERR_OR_ZERO(lnk->roce_qp);
680 if (IS_ERR(lnk->roce_qp))
681 lnk->roce_qp = NULL;
682 else
683 smc_wr_remember_qp_attr(lnk);
684 return rc;
685}
686
Ursula Braun897e1c22017-07-28 13:56:16 +0200687void smc_ib_put_memory_region(struct ib_mr *mr)
688{
689 ib_dereg_mr(mr);
690}
691
Karsten Graul387707f2020-04-29 17:10:40 +0200692static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot, u8 link_idx)
Ursula Braun897e1c22017-07-28 13:56:16 +0200693{
694 unsigned int offset = 0;
695 int sg_num;
696
697 /* map the largest prefix of a dma mapped SG list */
Karsten Graul387707f2020-04-29 17:10:40 +0200698 sg_num = ib_map_mr_sg(buf_slot->mr_rx[link_idx],
699 buf_slot->sgt[link_idx].sgl,
700 buf_slot->sgt[link_idx].orig_nents,
Ursula Braun897e1c22017-07-28 13:56:16 +0200701 &offset, PAGE_SIZE);
702
703 return sg_num;
704}
705
706/* Allocate a memory region and map the dma mapped SG list of buf_slot */
707int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags,
Karsten Graul387707f2020-04-29 17:10:40 +0200708 struct smc_buf_desc *buf_slot, u8 link_idx)
Ursula Braun897e1c22017-07-28 13:56:16 +0200709{
Karsten Graul387707f2020-04-29 17:10:40 +0200710 if (buf_slot->mr_rx[link_idx])
Ursula Braun897e1c22017-07-28 13:56:16 +0200711 return 0; /* already done */
712
Karsten Graul387707f2020-04-29 17:10:40 +0200713 buf_slot->mr_rx[link_idx] =
Ursula Braun897e1c22017-07-28 13:56:16 +0200714 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order);
Karsten Graul387707f2020-04-29 17:10:40 +0200715 if (IS_ERR(buf_slot->mr_rx[link_idx])) {
Ursula Braun897e1c22017-07-28 13:56:16 +0200716 int rc;
717
Karsten Graul387707f2020-04-29 17:10:40 +0200718 rc = PTR_ERR(buf_slot->mr_rx[link_idx]);
719 buf_slot->mr_rx[link_idx] = NULL;
Ursula Braun897e1c22017-07-28 13:56:16 +0200720 return rc;
721 }
722
Karsten Graul387707f2020-04-29 17:10:40 +0200723 if (smc_ib_map_mr_sg(buf_slot, link_idx) != 1)
Ursula Braun897e1c22017-07-28 13:56:16 +0200724 return -EINVAL;
725
726 return 0;
727}
728
Ursula Braun10428dd2017-07-28 13:56:22 +0200729/* synchronize buffer usage for cpu access */
Karsten Graul387707f2020-04-29 17:10:40 +0200730void smc_ib_sync_sg_for_cpu(struct smc_link *lnk,
Ursula Braun10428dd2017-07-28 13:56:22 +0200731 struct smc_buf_desc *buf_slot,
732 enum dma_data_direction data_direction)
733{
734 struct scatterlist *sg;
735 unsigned int i;
736
737 /* for now there is just one DMA address */
Karsten Graul387707f2020-04-29 17:10:40 +0200738 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
739 buf_slot->sgt[lnk->link_idx].nents, i) {
Ursula Braun10428dd2017-07-28 13:56:22 +0200740 if (!sg_dma_len(sg))
741 break;
Karsten Graul387707f2020-04-29 17:10:40 +0200742 ib_dma_sync_single_for_cpu(lnk->smcibdev->ibdev,
Ursula Braun10428dd2017-07-28 13:56:22 +0200743 sg_dma_address(sg),
744 sg_dma_len(sg),
745 data_direction);
746 }
747}
748
749/* synchronize buffer usage for device access */
Karsten Graul387707f2020-04-29 17:10:40 +0200750void smc_ib_sync_sg_for_device(struct smc_link *lnk,
Ursula Braun10428dd2017-07-28 13:56:22 +0200751 struct smc_buf_desc *buf_slot,
752 enum dma_data_direction data_direction)
753{
754 struct scatterlist *sg;
755 unsigned int i;
756
757 /* for now there is just one DMA address */
Karsten Graul387707f2020-04-29 17:10:40 +0200758 for_each_sg(buf_slot->sgt[lnk->link_idx].sgl, sg,
759 buf_slot->sgt[lnk->link_idx].nents, i) {
Ursula Braun10428dd2017-07-28 13:56:22 +0200760 if (!sg_dma_len(sg))
761 break;
Karsten Graul387707f2020-04-29 17:10:40 +0200762 ib_dma_sync_single_for_device(lnk->smcibdev->ibdev,
Ursula Braun10428dd2017-07-28 13:56:22 +0200763 sg_dma_address(sg),
764 sg_dma_len(sg),
765 data_direction);
766 }
767}
768
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200769/* Map a new TX or RX buffer SG-table to DMA */
Karsten Graul387707f2020-04-29 17:10:40 +0200770int smc_ib_buf_map_sg(struct smc_link *lnk,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200771 struct smc_buf_desc *buf_slot,
772 enum dma_data_direction data_direction)
773{
774 int mapped_nents;
775
Karsten Graul387707f2020-04-29 17:10:40 +0200776 mapped_nents = ib_dma_map_sg(lnk->smcibdev->ibdev,
777 buf_slot->sgt[lnk->link_idx].sgl,
778 buf_slot->sgt[lnk->link_idx].orig_nents,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200779 data_direction);
780 if (!mapped_nents)
781 return -ENOMEM;
782
783 return mapped_nents;
784}
785
Karsten Graul387707f2020-04-29 17:10:40 +0200786void smc_ib_buf_unmap_sg(struct smc_link *lnk,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200787 struct smc_buf_desc *buf_slot,
788 enum dma_data_direction data_direction)
789{
Karsten Graul387707f2020-04-29 17:10:40 +0200790 if (!buf_slot->sgt[lnk->link_idx].sgl->dma_address)
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200791 return; /* already unmapped */
792
Karsten Graul387707f2020-04-29 17:10:40 +0200793 ib_dma_unmap_sg(lnk->smcibdev->ibdev,
794 buf_slot->sgt[lnk->link_idx].sgl,
795 buf_slot->sgt[lnk->link_idx].orig_nents,
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200796 data_direction);
Karsten Graul387707f2020-04-29 17:10:40 +0200797 buf_slot->sgt[lnk->link_idx].sgl->dma_address = 0;
Ursula Brauna3fe3d02017-07-28 13:56:15 +0200798}
799
Ursula Braunbd4ad572017-01-09 16:55:20 +0100800long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev)
801{
802 struct ib_cq_init_attr cqattr = {
Ursula Braunc9f4c6c2018-03-14 11:01:00 +0100803 .cqe = SMC_MAX_CQE, .comp_vector = 0 };
804 int cqe_size_order, smc_order;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100805 long rc;
806
Karsten Graul63673592020-07-18 15:06:12 +0200807 mutex_lock(&smcibdev->mutex);
808 rc = 0;
809 if (smcibdev->initialized)
810 goto out;
Ursula Braunc9f4c6c2018-03-14 11:01:00 +0100811 /* the calculated number of cq entries fits to mlx5 cq allocation */
812 cqe_size_order = cache_line_size() == 128 ? 7 : 6;
813 smc_order = MAX_ORDER - cqe_size_order - 1;
814 if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE)
815 cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100816 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev,
817 smc_wr_tx_cq_handler, NULL,
818 smcibdev, &cqattr);
819 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send);
820 if (IS_ERR(smcibdev->roce_cq_send)) {
821 smcibdev->roce_cq_send = NULL;
Karsten Graul63673592020-07-18 15:06:12 +0200822 goto out;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100823 }
824 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev,
825 smc_wr_rx_cq_handler, NULL,
826 smcibdev, &cqattr);
827 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv);
828 if (IS_ERR(smcibdev->roce_cq_recv)) {
829 smcibdev->roce_cq_recv = NULL;
830 goto err;
831 }
Ursula Braunbd4ad572017-01-09 16:55:20 +0100832 smc_wr_add_dev(smcibdev);
833 smcibdev->initialized = 1;
Karsten Graul63673592020-07-18 15:06:12 +0200834 goto out;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100835
836err:
837 ib_destroy_cq(smcibdev->roce_cq_send);
Karsten Graul63673592020-07-18 15:06:12 +0200838out:
839 mutex_unlock(&smcibdev->mutex);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100840 return rc;
841}
842
843static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev)
844{
Karsten Graul63673592020-07-18 15:06:12 +0200845 mutex_lock(&smcibdev->mutex);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100846 if (!smcibdev->initialized)
Karsten Graul63673592020-07-18 15:06:12 +0200847 goto out;
Ursula Braunda05bf22018-01-26 09:28:46 +0100848 smcibdev->initialized = 0;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100849 ib_destroy_cq(smcibdev->roce_cq_recv);
850 ib_destroy_cq(smcibdev->roce_cq_send);
Ursula Braun6a37ad32019-11-14 13:02:46 +0100851 smc_wr_remove_dev(smcibdev);
Karsten Graul63673592020-07-18 15:06:12 +0200852out:
853 mutex_unlock(&smcibdev->mutex);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100854}
855
Ursula Brauna4cf0442017-01-09 16:55:14 +0100856static struct ib_client smc_ib_client;
857
Guvenc Gulce3d453f52020-12-01 20:20:40 +0100858static void smc_copy_netdev_ifindex(struct smc_ib_device *smcibdev, int port)
859{
860 struct ib_device *ibdev = smcibdev->ibdev;
861 struct net_device *ndev;
862
863 if (!ibdev->ops.get_netdev)
864 return;
865 ndev = ibdev->ops.get_netdev(ibdev, port + 1);
866 if (ndev) {
867 smcibdev->ndev_ifidx[port] = ndev->ifindex;
868 dev_put(ndev);
869 }
870}
871
872void smc_ib_ndev_change(struct net_device *ndev, unsigned long event)
873{
874 struct smc_ib_device *smcibdev;
875 struct ib_device *libdev;
876 struct net_device *lndev;
877 u8 port_cnt;
878 int i;
879
880 mutex_lock(&smc_ib_devices.mutex);
881 list_for_each_entry(smcibdev, &smc_ib_devices.list, list) {
882 port_cnt = smcibdev->ibdev->phys_port_cnt;
883 for (i = 0; i < min_t(size_t, port_cnt, SMC_MAX_PORTS); i++) {
884 libdev = smcibdev->ibdev;
885 if (!libdev->ops.get_netdev)
886 continue;
887 lndev = libdev->ops.get_netdev(libdev, i + 1);
Yajun Deng1160dfa2021-08-05 19:55:27 +0800888 dev_put(lndev);
Guvenc Gulce3d453f52020-12-01 20:20:40 +0100889 if (lndev != ndev)
890 continue;
891 if (event == NETDEV_REGISTER)
892 smcibdev->ndev_ifidx[i] = ndev->ifindex;
893 if (event == NETDEV_UNREGISTER)
894 smcibdev->ndev_ifidx[i] = 0;
895 }
896 }
897 mutex_unlock(&smc_ib_devices.mutex);
898}
899
Ursula Brauna4cf0442017-01-09 16:55:14 +0100900/* callback function for ib_register_client() */
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300901static int smc_ib_add_dev(struct ib_device *ibdev)
Ursula Brauna4cf0442017-01-09 16:55:14 +0100902{
903 struct smc_ib_device *smcibdev;
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200904 u8 port_cnt;
905 int i;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100906
907 if (ibdev->node_type != RDMA_NODE_IB_CA)
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300908 return -EOPNOTSUPP;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100909
910 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL);
911 if (!smcibdev)
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300912 return -ENOMEM;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100913
914 smcibdev->ibdev = ibdev;
Ursula Braunbd4ad572017-01-09 16:55:20 +0100915 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work);
Ursula Braun6dabd402019-11-16 17:47:29 +0100916 atomic_set(&smcibdev->lnk_cnt, 0);
917 init_waitqueue_head(&smcibdev->lnks_deleted);
Karsten Graul63673592020-07-18 15:06:12 +0200918 mutex_init(&smcibdev->mutex);
Ursula Braun92f3cb02020-07-08 17:05:13 +0200919 mutex_lock(&smc_ib_devices.mutex);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100920 list_add_tail(&smcibdev->list, &smc_ib_devices.list);
Ursula Braun92f3cb02020-07-08 17:05:13 +0200921 mutex_unlock(&smc_ib_devices.mutex);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100922 ib_set_client_data(ibdev, &smc_ib_client, smcibdev);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200923 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev,
924 smc_ib_global_event_handler);
925 ib_register_event_handler(&smcibdev->event_handler);
926
927 /* trigger reading of the port attributes */
928 port_cnt = smcibdev->ibdev->phys_port_cnt;
Karsten Graul0a99be42020-05-05 15:01:20 +0200929 pr_warn_ratelimited("smc: adding ib device %s with port count %d\n",
930 smcibdev->ibdev->name, port_cnt);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200931 for (i = 0;
932 i < min_t(size_t, port_cnt, SMC_MAX_PORTS);
Ursula Braun0afff912018-06-28 19:05:05 +0200933 i++) {
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200934 set_bit(i, &smcibdev->port_event_mask);
Ursula Braun0afff912018-06-28 19:05:05 +0200935 /* determine pnetids of the port */
Karsten Graulfdff7042020-04-29 17:10:37 +0200936 if (smc_pnetid_by_dev_port(ibdev->dev.parent, i,
937 smcibdev->pnetid[i]))
938 smc_pnetid_by_table_ib(smcibdev, i + 1);
Guvenc Gulce3d453f52020-12-01 20:20:40 +0100939 smc_copy_netdev_ifindex(smcibdev, i);
Karsten Graul0a99be42020-05-05 15:01:20 +0200940 pr_warn_ratelimited("smc: ib device %s port %d has pnetid "
941 "%.16s%s\n",
942 smcibdev->ibdev->name, i + 1,
943 smcibdev->pnetid[i],
944 smcibdev->pnetid_by_user[i] ?
945 " (user defined)" :
946 "");
Ursula Braun0afff912018-06-28 19:05:05 +0200947 }
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200948 schedule_work(&smcibdev->port_event_work);
Jason Gunthorpe11a0ae42020-04-21 20:24:40 +0300949 return 0;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100950}
951
Ursula Braun0b29ec62019-11-14 13:02:47 +0100952/* callback function for ib_unregister_client() */
Ursula Brauna4cf0442017-01-09 16:55:14 +0100953static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
954{
Jason Gunthorpe15879822020-04-07 20:20:09 -0300955 struct smc_ib_device *smcibdev = client_data;
Ursula Brauna4cf0442017-01-09 16:55:14 +0100956
Ursula Braun92f3cb02020-07-08 17:05:13 +0200957 mutex_lock(&smc_ib_devices.mutex);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100958 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
Ursula Braun92f3cb02020-07-08 17:05:13 +0200959 mutex_unlock(&smc_ib_devices.mutex);
Karsten Graul0a99be42020-05-05 15:01:20 +0200960 pr_warn_ratelimited("smc: removing ib device %s\n",
961 smcibdev->ibdev->name);
Ursula Braun0b29ec62019-11-14 13:02:47 +0100962 smc_smcr_terminate_all(smcibdev);
Ursula Braunbd4ad572017-01-09 16:55:20 +0100963 smc_ib_cleanup_per_ibdev(smcibdev);
Ursula Braunbe6a3f32018-06-28 19:05:04 +0200964 ib_unregister_event_handler(&smcibdev->event_handler);
Karsten Graulece0d7b2020-03-10 09:33:30 +0100965 cancel_work_sync(&smcibdev->port_event_work);
Ursula Brauna4cf0442017-01-09 16:55:14 +0100966 kfree(smcibdev);
967}
968
969static struct ib_client smc_ib_client = {
970 .name = "smc_ib",
971 .add = smc_ib_add_dev,
972 .remove = smc_ib_remove_dev,
973};
974
975int __init smc_ib_register_client(void)
976{
Hans Wippel366bb242020-02-25 22:41:21 +0100977 smc_ib_init_local_systemid();
Ursula Brauna4cf0442017-01-09 16:55:14 +0100978 return ib_register_client(&smc_ib_client);
979}
980
981void smc_ib_unregister_client(void)
982{
983 ib_unregister_client(&smc_ib_client);
984}