blob: e1a2b02bbd91bea1323f006436639e1ed5cfa2d6 [file] [log] [blame]
Gary Leshner438d7dd2020-05-11 12:05:54 -04001// SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
2/*
3 * Copyright(c) 2020 Intel Corporation.
4 *
5 */
6
7/*
8 * This file contains HFI1 support for ipoib functionality
9 */
10
11#include "ipoib.h"
12#include "hfi.h"
13
Jakub Kicinskifd922132021-10-19 11:26:04 -070014static u32 qpn_from_mac(const u8 *mac_arr)
Gary Leshner438d7dd2020-05-11 12:05:54 -040015{
16 return (u32)mac_arr[1] << 16 | mac_arr[2] << 8 | mac_arr[3];
17}
18
19static int hfi1_ipoib_dev_init(struct net_device *dev)
20{
21 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
Grzegorz Andrejczuk370caa52020-05-11 12:06:43 -040022 int ret;
Gary Leshner438d7dd2020-05-11 12:05:54 -040023
Heiner Kallweitaa0616a2020-11-10 20:47:34 +010024 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
Gary Leshner438d7dd2020-05-11 12:05:54 -040025
Grzegorz Andrejczuk370caa52020-05-11 12:06:43 -040026 ret = priv->netdev_ops->ndo_init(dev);
27 if (ret)
28 return ret;
29
30 ret = hfi1_netdev_add_data(priv->dd,
31 qpn_from_mac(priv->netdev->dev_addr),
32 dev);
33 if (ret < 0) {
34 priv->netdev_ops->ndo_uninit(dev);
35 return ret;
36 }
37
38 return 0;
Gary Leshner438d7dd2020-05-11 12:05:54 -040039}
40
41static void hfi1_ipoib_dev_uninit(struct net_device *dev)
42{
43 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
44
Grzegorz Andrejczuk370caa52020-05-11 12:06:43 -040045 hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr));
46
Gary Leshner438d7dd2020-05-11 12:05:54 -040047 priv->netdev_ops->ndo_uninit(dev);
48}
49
50static int hfi1_ipoib_dev_open(struct net_device *dev)
51{
52 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
53 int ret;
54
55 ret = priv->netdev_ops->ndo_open(dev);
56 if (!ret) {
57 struct hfi1_ibport *ibp = to_iport(priv->device,
58 priv->port_num);
59 struct rvt_qp *qp;
60 u32 qpn = qpn_from_mac(priv->netdev->dev_addr);
61
62 rcu_read_lock();
63 qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
64 if (!qp) {
65 rcu_read_unlock();
66 priv->netdev_ops->ndo_stop(dev);
67 return -EINVAL;
68 }
69 rvt_get_qp(qp);
70 priv->qp = qp;
71 rcu_read_unlock();
72
Grzegorz Andrejczuk370caa52020-05-11 12:06:43 -040073 hfi1_netdev_enable_queues(priv->dd);
Gary Leshner438d7dd2020-05-11 12:05:54 -040074 hfi1_ipoib_napi_tx_enable(dev);
75 }
76
77 return ret;
78}
79
80static int hfi1_ipoib_dev_stop(struct net_device *dev)
81{
82 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
83
84 if (!priv->qp)
85 return 0;
86
87 hfi1_ipoib_napi_tx_disable(dev);
Grzegorz Andrejczuk370caa52020-05-11 12:06:43 -040088 hfi1_netdev_disable_queues(priv->dd);
Gary Leshner438d7dd2020-05-11 12:05:54 -040089
90 rvt_put_qp(priv->qp);
91 priv->qp = NULL;
92
93 return priv->netdev_ops->ndo_stop(dev);
94}
95
Gary Leshner438d7dd2020-05-11 12:05:54 -040096static const struct net_device_ops hfi1_ipoib_netdev_ops = {
97 .ndo_init = hfi1_ipoib_dev_init,
98 .ndo_uninit = hfi1_ipoib_dev_uninit,
99 .ndo_open = hfi1_ipoib_dev_open,
100 .ndo_stop = hfi1_ipoib_dev_stop,
Heiner Kallweitaa0616a2020-11-10 20:47:34 +0100101 .ndo_get_stats64 = dev_get_tstats64,
Gary Leshner438d7dd2020-05-11 12:05:54 -0400102};
103
Gary Leshner438d7dd2020-05-11 12:05:54 -0400104static int hfi1_ipoib_mcast_attach(struct net_device *dev,
105 struct ib_device *device,
106 union ib_gid *mgid,
107 u16 mlid,
108 int set_qkey,
109 u32 qkey)
110{
111 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
112 u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr);
113 struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num);
114 struct rvt_qp *qp;
115 int ret = -EINVAL;
116
117 rcu_read_lock();
118
119 qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
120 if (qp) {
121 rvt_get_qp(qp);
122 rcu_read_unlock();
123 if (set_qkey)
124 priv->qkey = qkey;
125
126 /* attach QP to multicast group */
127 ret = ib_attach_mcast(&qp->ibqp, mgid, mlid);
128 rvt_put_qp(qp);
129 } else {
130 rcu_read_unlock();
131 }
132
133 return ret;
134}
135
136static int hfi1_ipoib_mcast_detach(struct net_device *dev,
137 struct ib_device *device,
138 union ib_gid *mgid,
139 u16 mlid)
140{
141 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
142 u32 qpn = (u32)qpn_from_mac(priv->netdev->dev_addr);
143 struct hfi1_ibport *ibp = to_iport(priv->device, priv->port_num);
144 struct rvt_qp *qp;
145 int ret = -EINVAL;
146
147 rcu_read_lock();
148
149 qp = rvt_lookup_qpn(ib_to_rvt(priv->device), &ibp->rvp, qpn);
150 if (qp) {
151 rvt_get_qp(qp);
152 rcu_read_unlock();
153 ret = ib_detach_mcast(&qp->ibqp, mgid, mlid);
154 rvt_put_qp(qp);
155 } else {
156 rcu_read_unlock();
157 }
158 return ret;
159}
160
161static void hfi1_ipoib_netdev_dtor(struct net_device *dev)
162{
163 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
164
165 hfi1_ipoib_txreq_deinit(priv);
Grzegorz Andrejczuk370caa52020-05-11 12:06:43 -0400166 hfi1_ipoib_rxq_deinit(priv->netdev);
Gary Leshner438d7dd2020-05-11 12:05:54 -0400167
Heiner Kallweitaa0616a2020-11-10 20:47:34 +0100168 free_percpu(dev->tstats);
Gary Leshner438d7dd2020-05-11 12:05:54 -0400169}
170
171static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev)
172{
173 hfi1_ipoib_netdev_dtor(dev);
174 free_netdev(dev);
175}
176
177static void hfi1_ipoib_set_id(struct net_device *dev, int id)
178{
179 struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev);
180
181 priv->pkey_index = (u16)id;
182 ib_query_pkey(priv->device,
183 priv->port_num,
184 priv->pkey_index,
185 &priv->pkey);
186}
187
188static int hfi1_ipoib_setup_rn(struct ib_device *device,
Mark Bloch1fb7f892021-03-01 09:04:20 +0200189 u32 port_num,
Gary Leshner438d7dd2020-05-11 12:05:54 -0400190 struct net_device *netdev,
191 void *param)
192{
193 struct hfi1_devdata *dd = dd_from_ibdev(device);
194 struct rdma_netdev *rn = netdev_priv(netdev);
195 struct hfi1_ipoib_dev_priv *priv;
196 int rc;
197
198 rn->send = hfi1_ipoib_send;
Mike Marciniszyn042a00f2021-03-29 09:54:08 -0400199 rn->tx_timeout = hfi1_ipoib_tx_timeout;
Gary Leshner438d7dd2020-05-11 12:05:54 -0400200 rn->attach_mcast = hfi1_ipoib_mcast_attach;
201 rn->detach_mcast = hfi1_ipoib_mcast_detach;
202 rn->set_id = hfi1_ipoib_set_id;
203 rn->hca = device;
204 rn->port_num = port_num;
205 rn->mtu = netdev->mtu;
206
207 priv = hfi1_ipoib_priv(netdev);
208 priv->dd = dd;
209 priv->netdev = netdev;
210 priv->device = device;
211 priv->port_num = port_num;
212 priv->netdev_ops = netdev->netdev_ops;
213
214 netdev->netdev_ops = &hfi1_ipoib_netdev_ops;
215
216 ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey);
217
218 rc = hfi1_ipoib_txreq_init(priv);
219 if (rc) {
220 dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc);
221 hfi1_ipoib_free_rdma_netdev(netdev);
222 return rc;
223 }
224
Grzegorz Andrejczuk370caa52020-05-11 12:06:43 -0400225 rc = hfi1_ipoib_rxq_init(netdev);
226 if (rc) {
227 dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc);
228 hfi1_ipoib_free_rdma_netdev(netdev);
229 return rc;
230 }
231
Gary Leshner438d7dd2020-05-11 12:05:54 -0400232 netdev->priv_destructor = hfi1_ipoib_netdev_dtor;
233 netdev->needs_free_netdev = true;
234
235 return 0;
236}
237
238int hfi1_ipoib_rn_get_params(struct ib_device *device,
Mark Bloch1fb7f892021-03-01 09:04:20 +0200239 u32 port_num,
Gary Leshner438d7dd2020-05-11 12:05:54 -0400240 enum rdma_netdev_t type,
241 struct rdma_netdev_alloc_params *params)
242{
243 struct hfi1_devdata *dd = dd_from_ibdev(device);
244
245 if (type != RDMA_NETDEV_IPOIB)
246 return -EOPNOTSUPP;
247
Grzegorz Andrejczuk370caa52020-05-11 12:06:43 -0400248 if (!HFI1_CAP_IS_KSET(AIP) || !dd->num_netdev_contexts)
Gary Leshner438d7dd2020-05-11 12:05:54 -0400249 return -EOPNOTSUPP;
250
251 if (!port_num || port_num > dd->num_pports)
252 return -EINVAL;
253
254 params->sizeof_priv = sizeof(struct hfi1_ipoib_rdma_netdev);
255 params->txqs = dd->num_sdma;
Grzegorz Andrejczuk370caa52020-05-11 12:06:43 -0400256 params->rxqs = dd->num_netdev_contexts;
Gary Leshner438d7dd2020-05-11 12:05:54 -0400257 params->param = NULL;
258 params->initialize_rdma_netdev = hfi1_ipoib_setup_rn;
259
260 return 0;
261}