blob: fab29124536648a99ae98b63d7f0203e0d84d405 [file] [log] [blame]
Bob Pearson63fa15d2020-08-27 09:54:40 -05001// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
Moni Shoua8700e3e2016-06-16 16:45:23 +03002/*
3 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
Moni Shoua8700e3e2016-06-16 16:45:23 +03005 */
6
Steve Wise66920e12019-02-15 11:03:57 -08007#include <rdma/rdma_netlink.h>
Yuval Shaia4d6f2852017-03-14 16:01:57 +02008#include <net/addrconf.h>
Moni Shoua8700e3e2016-06-16 16:45:23 +03009#include "rxe.h"
10#include "rxe_loc.h"
11
12MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
13MODULE_DESCRIPTION("Soft RDMA transport");
14MODULE_LICENSE("Dual BSD/GPL");
Moni Shoua8700e3e2016-06-16 16:45:23 +030015
Moni Shoua8700e3e2016-06-16 16:45:23 +030016/* free resources for a rxe device all objects created for this device must
17 * have been destroyed
18 */
Jason Gunthorpec3670742019-01-22 16:27:24 -070019void rxe_dealloc(struct ib_device *ib_dev)
Moni Shoua8700e3e2016-06-16 16:45:23 +030020{
Jason Gunthorpec3670742019-01-22 16:27:24 -070021 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
22
Moni Shoua8700e3e2016-06-16 16:45:23 +030023 rxe_pool_cleanup(&rxe->uc_pool);
24 rxe_pool_cleanup(&rxe->pd_pool);
25 rxe_pool_cleanup(&rxe->ah_pool);
26 rxe_pool_cleanup(&rxe->srq_pool);
27 rxe_pool_cleanup(&rxe->qp_pool);
28 rxe_pool_cleanup(&rxe->cq_pool);
29 rxe_pool_cleanup(&rxe->mr_pool);
30 rxe_pool_cleanup(&rxe->mw_pool);
31 rxe_pool_cleanup(&rxe->mc_grp_pool);
32 rxe_pool_cleanup(&rxe->mc_elem_pool);
33
Jason Gunthorpec3670742019-01-22 16:27:24 -070034 if (rxe->tfm)
35 crypto_free_shash(rxe->tfm);
Moni Shoua8700e3e2016-06-16 16:45:23 +030036}
37
Moni Shoua8700e3e2016-06-16 16:45:23 +030038/* initialize rxe device parameters */
Zhu Yanjunbefd8d92018-03-07 00:47:57 -050039static void rxe_init_device_param(struct rxe_dev *rxe)
Moni Shoua8700e3e2016-06-16 16:45:23 +030040{
41 rxe->max_inline_data = RXE_MAX_INLINE_DATA;
42
Zhu Yanjun0184afd2020-04-06 20:35:01 +030043 rxe->attr.vendor_id = RXE_VENDOR_ID;
Moni Shoua8700e3e2016-06-16 16:45:23 +030044 rxe->attr.max_mr_size = RXE_MAX_MR_SIZE;
45 rxe->attr.page_size_cap = RXE_PAGE_SIZE_CAP;
Moni Shoua8700e3e2016-06-16 16:45:23 +030046 rxe->attr.max_qp = RXE_MAX_QP;
47 rxe->attr.max_qp_wr = RXE_MAX_QP_WR;
48 rxe->attr.device_cap_flags = RXE_DEVICE_CAP_FLAGS;
Steve Wise33023fb2018-06-18 08:05:26 -070049 rxe->attr.max_send_sge = RXE_MAX_SGE;
50 rxe->attr.max_recv_sge = RXE_MAX_SGE;
Moni Shoua8700e3e2016-06-16 16:45:23 +030051 rxe->attr.max_sge_rd = RXE_MAX_SGE_RD;
52 rxe->attr.max_cq = RXE_MAX_CQ;
53 rxe->attr.max_cqe = (1 << RXE_MAX_LOG_CQE) - 1;
54 rxe->attr.max_mr = RXE_MAX_MR;
Bob Pearsonaf732ad2021-06-07 23:25:46 -050055 rxe->attr.max_mw = RXE_MAX_MW;
Moni Shoua8700e3e2016-06-16 16:45:23 +030056 rxe->attr.max_pd = RXE_MAX_PD;
57 rxe->attr.max_qp_rd_atom = RXE_MAX_QP_RD_ATOM;
Moni Shoua8700e3e2016-06-16 16:45:23 +030058 rxe->attr.max_res_rd_atom = RXE_MAX_RES_RD_ATOM;
59 rxe->attr.max_qp_init_rd_atom = RXE_MAX_QP_INIT_RD_ATOM;
Nathan Chancellor0797e6f2018-09-26 22:12:23 -070060 rxe->attr.atomic_cap = IB_ATOMIC_HCA;
Moni Shoua8700e3e2016-06-16 16:45:23 +030061 rxe->attr.max_mcast_grp = RXE_MAX_MCAST_GRP;
62 rxe->attr.max_mcast_qp_attach = RXE_MAX_MCAST_QP_ATTACH;
63 rxe->attr.max_total_mcast_qp_attach = RXE_MAX_TOT_MCAST_QP_ATTACH;
64 rxe->attr.max_ah = RXE_MAX_AH;
Moni Shoua8700e3e2016-06-16 16:45:23 +030065 rxe->attr.max_srq = RXE_MAX_SRQ;
66 rxe->attr.max_srq_wr = RXE_MAX_SRQ_WR;
67 rxe->attr.max_srq_sge = RXE_MAX_SRQ_SGE;
68 rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
69 rxe->attr.max_pkeys = RXE_MAX_PKEYS;
70 rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
Zhu Yanjund0ca2c32020-03-23 13:28:00 +020071 addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
72 rxe->ndev->dev_addr);
Moni Shoua8700e3e2016-06-16 16:45:23 +030073
74 rxe->max_ucontext = RXE_MAX_UCONTEXT;
Moni Shoua8700e3e2016-06-16 16:45:23 +030075}
76
77/* initialize port attributes */
Kamal Heib9d576ea2020-07-05 13:43:11 +030078static void rxe_init_port_param(struct rxe_port *port)
Moni Shoua8700e3e2016-06-16 16:45:23 +030079{
Nathan Chancellor0797e6f2018-09-26 22:12:23 -070080 port->attr.state = IB_PORT_DOWN;
81 port->attr.max_mtu = IB_MTU_4096;
82 port->attr.active_mtu = IB_MTU_256;
Moni Shoua8700e3e2016-06-16 16:45:23 +030083 port->attr.gid_tbl_len = RXE_PORT_GID_TBL_LEN;
84 port->attr.port_cap_flags = RXE_PORT_PORT_CAP_FLAGS;
85 port->attr.max_msg_sz = RXE_PORT_MAX_MSG_SZ;
86 port->attr.bad_pkey_cntr = RXE_PORT_BAD_PKEY_CNTR;
87 port->attr.qkey_viol_cntr = RXE_PORT_QKEY_VIOL_CNTR;
88 port->attr.pkey_tbl_len = RXE_PORT_PKEY_TBL_LEN;
89 port->attr.lid = RXE_PORT_LID;
90 port->attr.sm_lid = RXE_PORT_SM_LID;
91 port->attr.lmc = RXE_PORT_LMC;
92 port->attr.max_vl_num = RXE_PORT_MAX_VL_NUM;
93 port->attr.sm_sl = RXE_PORT_SM_SL;
94 port->attr.subnet_timeout = RXE_PORT_SUBNET_TIMEOUT;
95 port->attr.init_type_reply = RXE_PORT_INIT_TYPE_REPLY;
96 port->attr.active_width = RXE_PORT_ACTIVE_WIDTH;
97 port->attr.active_speed = RXE_PORT_ACTIVE_SPEED;
98 port->attr.phys_state = RXE_PORT_PHYS_STATE;
Nathan Chancellor0797e6f2018-09-26 22:12:23 -070099 port->mtu_cap = ib_mtu_enum_to_int(IB_MTU_256);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300100 port->subnet_prefix = cpu_to_be64(RXE_PORT_SUBNET_PREFIX);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300101}
102
103/* initialize port state, note IB convention that HCA ports are always
104 * numbered from 1
105 */
Kamal Heib76251e12020-07-21 13:16:18 +0300106static void rxe_init_ports(struct rxe_dev *rxe)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300107{
108 struct rxe_port *port = &rxe->port;
109
110 rxe_init_port_param(port);
Yuval Shaia4d6f2852017-03-14 16:01:57 +0200111 addrconf_addr_eui48((unsigned char *)&port->port_guid,
112 rxe->ndev->dev_addr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300113 spin_lock_init(&port->port_lock);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300114}
115
116/* init pools of managed objects */
117static int rxe_init_pools(struct rxe_dev *rxe)
118{
119 int err;
120
121 err = rxe_pool_init(rxe, &rxe->uc_pool, RXE_TYPE_UC,
122 rxe->max_ucontext);
123 if (err)
124 goto err1;
125
126 err = rxe_pool_init(rxe, &rxe->pd_pool, RXE_TYPE_PD,
127 rxe->attr.max_pd);
128 if (err)
129 goto err2;
130
131 err = rxe_pool_init(rxe, &rxe->ah_pool, RXE_TYPE_AH,
132 rxe->attr.max_ah);
133 if (err)
134 goto err3;
135
136 err = rxe_pool_init(rxe, &rxe->srq_pool, RXE_TYPE_SRQ,
137 rxe->attr.max_srq);
138 if (err)
139 goto err4;
140
141 err = rxe_pool_init(rxe, &rxe->qp_pool, RXE_TYPE_QP,
142 rxe->attr.max_qp);
143 if (err)
144 goto err5;
145
146 err = rxe_pool_init(rxe, &rxe->cq_pool, RXE_TYPE_CQ,
147 rxe->attr.max_cq);
148 if (err)
149 goto err6;
150
151 err = rxe_pool_init(rxe, &rxe->mr_pool, RXE_TYPE_MR,
152 rxe->attr.max_mr);
153 if (err)
154 goto err7;
155
156 err = rxe_pool_init(rxe, &rxe->mw_pool, RXE_TYPE_MW,
157 rxe->attr.max_mw);
158 if (err)
159 goto err8;
160
161 err = rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP,
162 rxe->attr.max_mcast_grp);
163 if (err)
164 goto err9;
165
166 err = rxe_pool_init(rxe, &rxe->mc_elem_pool, RXE_TYPE_MC_ELEM,
167 rxe->attr.max_total_mcast_qp_attach);
168 if (err)
169 goto err10;
170
171 return 0;
172
173err10:
174 rxe_pool_cleanup(&rxe->mc_grp_pool);
175err9:
176 rxe_pool_cleanup(&rxe->mw_pool);
177err8:
178 rxe_pool_cleanup(&rxe->mr_pool);
179err7:
180 rxe_pool_cleanup(&rxe->cq_pool);
181err6:
182 rxe_pool_cleanup(&rxe->qp_pool);
183err5:
184 rxe_pool_cleanup(&rxe->srq_pool);
185err4:
186 rxe_pool_cleanup(&rxe->ah_pool);
187err3:
188 rxe_pool_cleanup(&rxe->pd_pool);
189err2:
190 rxe_pool_cleanup(&rxe->uc_pool);
191err1:
192 return err;
193}
194
195/* initialize rxe device state */
196static int rxe_init(struct rxe_dev *rxe)
197{
198 int err;
199
200 /* init default device parameters */
201 rxe_init_device_param(rxe);
202
Kamal Heib76251e12020-07-21 13:16:18 +0300203 rxe_init_ports(rxe);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300204
205 err = rxe_init_pools(rxe);
206 if (err)
Kamal Heib76251e12020-07-21 13:16:18 +0300207 return err;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300208
209 /* init pending mmap list */
210 spin_lock_init(&rxe->mmap_offset_lock);
211 spin_lock_init(&rxe->pending_lock);
212 INIT_LIST_HEAD(&rxe->pending_mmaps);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300213
214 mutex_init(&rxe->usdev_lock);
215
216 return 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300217}
218
Zhu Yanjun0dff4632018-04-20 10:30:54 -0400219void rxe_set_mtu(struct rxe_dev *rxe, unsigned int ndev_mtu)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300220{
221 struct rxe_port *port = &rxe->port;
222 enum ib_mtu mtu;
223
224 mtu = eth_mtu_int_to_enum(ndev_mtu);
225
226 /* Make sure that new MTU in range */
Nathan Chancellor0797e6f2018-09-26 22:12:23 -0700227 mtu = mtu ? min_t(enum ib_mtu, mtu, IB_MTU_4096) : IB_MTU_256;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300228
229 port->attr.active_mtu = mtu;
230 port->mtu_cap = ib_mtu_enum_to_int(mtu);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300231}
Moni Shoua8700e3e2016-06-16 16:45:23 +0300232
233/* called by ifc layer to create new rxe device.
234 * The caller should allocate memory for rxe by calling ib_alloc_device.
235 */
Steve Wise66920e12019-02-15 11:03:57 -0800236int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300237{
238 int err;
239
Moni Shoua8700e3e2016-06-16 16:45:23 +0300240 err = rxe_init(rxe);
241 if (err)
Jason Gunthorpec3670742019-01-22 16:27:24 -0700242 return err;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300243
Zhu Yanjun0dff4632018-04-20 10:30:54 -0400244 rxe_set_mtu(rxe, mtu);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300245
Steve Wise66920e12019-02-15 11:03:57 -0800246 return rxe_register_device(rxe, ibdev_name);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300247}
Moni Shoua8700e3e2016-06-16 16:45:23 +0300248
Steve Wise66920e12019-02-15 11:03:57 -0800249static int rxe_newlink(const char *ibdev_name, struct net_device *ndev)
250{
251 struct rxe_dev *exists;
252 int err = 0;
253
Mohammad Heibfd49dda2020-08-11 18:04:15 +0300254 if (is_vlan_dev(ndev)) {
255 pr_err("rxe creation allowed on top of a real device only\n");
256 err = -EPERM;
257 goto err;
258 }
259
Steve Wise66920e12019-02-15 11:03:57 -0800260 exists = rxe_get_dev_from_net(ndev);
261 if (exists) {
262 ib_device_put(&exists->ib_dev);
263 pr_err("already configured on %s\n", ndev->name);
264 err = -EEXIST;
265 goto err;
266 }
267
268 err = rxe_net_add(ibdev_name, ndev);
269 if (err) {
270 pr_err("failed to add %s\n", ndev->name);
271 goto err;
272 }
273err:
274 return err;
275}
276
277static struct rdma_link_ops rxe_link_ops = {
278 .type = "rxe",
279 .newlink = rxe_newlink,
280};
281
Moni Shoua8700e3e2016-06-16 16:45:23 +0300282static int __init rxe_module_init(void)
283{
284 int err;
285
Parav Pandite404f942016-09-28 20:26:26 +0000286 err = rxe_net_init();
287 if (err)
288 return err;
Yonatan Cohendfdd6152016-09-07 14:04:04 +0300289
Steve Wise66920e12019-02-15 11:03:57 -0800290 rdma_link_register(&rxe_link_ops);
Parav Pandite404f942016-09-28 20:26:26 +0000291 pr_info("loaded\n");
Moni Shoua8700e3e2016-06-16 16:45:23 +0300292 return 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300293}
294
295static void __exit rxe_module_exit(void)
296{
Steve Wise66920e12019-02-15 11:03:57 -0800297 rdma_link_unregister(&rxe_link_ops);
Jason Gunthorpec3670742019-01-22 16:27:24 -0700298 ib_unregister_driver(RDMA_DRIVER_RXE);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300299 rxe_net_exit();
Moni Shoua8700e3e2016-06-16 16:45:23 +0300300
Parav Pandite404f942016-09-28 20:26:26 +0000301 pr_info("unloaded\n");
Moni Shoua8700e3e2016-06-16 16:45:23 +0300302}
303
Stephen Batesb9fe8562016-09-23 09:32:11 -0600304late_initcall(rxe_module_init);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300305module_exit(rxe_module_exit);
Steve Wise66920e12019-02-15 11:03:57 -0800306
307MODULE_ALIAS_RDMA_LINK("rxe");