blob: 79ad93b4140cea3dccc1d3000a52f5fefd6a67b8 [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Bart Van Assche0bbb3b72017-01-20 13:04:37 -080034#include <linux/dma-mapping.h>
Yuval Shaia4d6f2852017-03-14 16:01:57 +020035#include <net/addrconf.h>
Shamir Rabinovitch89944452019-02-07 18:44:49 +020036#include <rdma/uverbs_ioctl.h>
Moni Shoua8700e3e2016-06-16 16:45:23 +030037#include "rxe.h"
38#include "rxe_loc.h"
39#include "rxe_queue.h"
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +020040#include "rxe_hw_counters.h"
Moni Shoua8700e3e2016-06-16 16:45:23 +030041
42static int rxe_query_device(struct ib_device *dev,
43 struct ib_device_attr *attr,
44 struct ib_udata *uhw)
45{
46 struct rxe_dev *rxe = to_rdev(dev);
47
48 if (uhw->inlen || uhw->outlen)
49 return -EINVAL;
50
51 *attr = rxe->attr;
52 return 0;
53}
54
Moni Shoua8700e3e2016-06-16 16:45:23 +030055static int rxe_query_port(struct ib_device *dev,
56 u8 port_num, struct ib_port_attr *attr)
57{
58 struct rxe_dev *rxe = to_rdev(dev);
59 struct rxe_port *port;
Yuval Shaia59590b82018-12-09 13:06:10 +020060 int rc;
Moni Shoua8700e3e2016-06-16 16:45:23 +030061
62 port = &rxe->port;
63
Or Gerlitzc4550c62017-01-24 13:02:39 +020064 /* *attr being zeroed by the caller, avoid zeroing it here */
Moni Shoua8700e3e2016-06-16 16:45:23 +030065 *attr = port->attr;
66
67 mutex_lock(&rxe->usdev_lock);
Yuval Shaiad4186192017-06-14 23:13:34 +030068 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
69 &attr->active_width);
Andrew Boyer5736c7c2018-11-01 09:18:45 -040070
71 if (attr->state == IB_PORT_ACTIVE)
72 attr->phys_state = RDMA_LINK_PHYS_STATE_LINK_UP;
73 else if (dev_get_flags(rxe->ndev) & IFF_UP)
74 attr->phys_state = RDMA_LINK_PHYS_STATE_POLLING;
75 else
76 attr->phys_state = RDMA_LINK_PHYS_STATE_DISABLED;
77
Moni Shoua8700e3e2016-06-16 16:45:23 +030078 mutex_unlock(&rxe->usdev_lock);
79
Yuval Shaiad4186192017-06-14 23:13:34 +030080 return rc;
Moni Shoua8700e3e2016-06-16 16:45:23 +030081}
82
Moni Shoua8700e3e2016-06-16 16:45:23 +030083static int rxe_query_pkey(struct ib_device *device,
84 u8 port_num, u16 index, u16 *pkey)
85{
86 struct rxe_dev *rxe = to_rdev(device);
87 struct rxe_port *port;
88
Moni Shoua8700e3e2016-06-16 16:45:23 +030089 port = &rxe->port;
90
91 if (unlikely(index >= port->attr.pkey_tbl_len)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -080092 dev_warn(device->dev.parent, "invalid index = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +030093 index);
94 goto err1;
95 }
96
97 *pkey = port->pkey_tbl[index];
98 return 0;
99
100err1:
101 return -EINVAL;
102}
103
104static int rxe_modify_device(struct ib_device *dev,
105 int mask, struct ib_device_modify *attr)
106{
107 struct rxe_dev *rxe = to_rdev(dev);
108
109 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
110 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
111
112 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
113 memcpy(rxe->ib_dev.node_desc,
114 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
115 }
116
117 return 0;
118}
119
120static int rxe_modify_port(struct ib_device *dev,
121 u8 port_num, int mask, struct ib_port_modify *attr)
122{
123 struct rxe_dev *rxe = to_rdev(dev);
124 struct rxe_port *port;
125
Moni Shoua8700e3e2016-06-16 16:45:23 +0300126 port = &rxe->port;
127
128 port->attr.port_cap_flags |= attr->set_port_cap_mask;
129 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
130
131 if (mask & IB_PORT_RESET_QKEY_CNTR)
132 port->attr.qkey_viol_cntr = 0;
133
134 return 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300135}
136
137static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
138 u8 port_num)
139{
140 struct rxe_dev *rxe = to_rdev(dev);
141
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800142 return rxe_link_layer(rxe, port_num);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300143}
144
145static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
146 struct ib_udata *udata)
147{
148 struct rxe_dev *rxe = to_rdev(dev);
149 struct rxe_ucontext *uc;
150
151 uc = rxe_alloc(&rxe->uc_pool);
152 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
153}
154
155static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
156{
157 struct rxe_ucontext *uc = to_ruc(ibuc);
158
159 rxe_drop_ref(uc);
160 return 0;
161}
162
163static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
164 struct ib_port_immutable *immutable)
165{
166 int err;
167 struct ib_port_attr attr;
168
Or Gerlitzc4550c62017-01-24 13:02:39 +0200169 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
170
171 err = ib_query_port(dev, port_num, &attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300172 if (err)
173 return err;
174
175 immutable->pkey_tbl_len = attr.pkey_tbl_len;
176 immutable->gid_tbl_len = attr.gid_tbl_len;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300177 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
178
179 return 0;
180}
181
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200182static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
183 struct ib_udata *udata)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300184{
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200185 struct rxe_dev *rxe = to_rdev(ibpd->device);
186 struct rxe_pd *pd = to_rpd(ibpd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300187
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200188 return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300189}
190
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200191static void rxe_dealloc_pd(struct ib_pd *ibpd)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300192{
193 struct rxe_pd *pd = to_rpd(ibpd);
194
195 rxe_drop_ref(pd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300196}
197
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400198static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
199 struct rdma_ah_attr *attr,
Gal Pressmanb090c4e2018-12-12 11:09:05 +0200200 u32 flags,
Moni Shoua477864c2016-11-23 08:23:24 +0200201 struct ib_udata *udata)
202
Moni Shoua8700e3e2016-06-16 16:45:23 +0300203{
204 int err;
205 struct rxe_dev *rxe = to_rdev(ibpd->device);
206 struct rxe_pd *pd = to_rpd(ibpd);
207 struct rxe_ah *ah;
208
209 err = rxe_av_chk_attr(rxe, attr);
210 if (err)
Bart Van Assche2f229bc2018-07-06 09:45:32 -0700211 return ERR_PTR(err);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300212
213 ah = rxe_alloc(&rxe->ah_pool);
Bart Van Assche2f229bc2018-07-06 09:45:32 -0700214 if (!ah)
215 return ERR_PTR(-ENOMEM);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300216
217 rxe_add_ref(pd);
218 ah->pd = pd;
219
Kamal Heibfa407182019-01-29 12:08:49 +0200220 rxe_init_av(attr, &ah->av);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300221 return &ah->ibah;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300222}
223
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400224static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300225{
226 int err;
227 struct rxe_dev *rxe = to_rdev(ibah->device);
228 struct rxe_ah *ah = to_rah(ibah);
229
230 err = rxe_av_chk_attr(rxe, attr);
231 if (err)
232 return err;
233
Kamal Heibfa407182019-01-29 12:08:49 +0200234 rxe_init_av(attr, &ah->av);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300235 return 0;
236}
237
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400238static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300239{
Moni Shoua8700e3e2016-06-16 16:45:23 +0300240 struct rxe_ah *ah = to_rah(ibah);
241
Dasaratharaman Chandramoulieca7ddf2017-04-29 14:41:17 -0400242 memset(attr, 0, sizeof(*attr));
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400243 attr->type = ibah->type;
Zhu Yanjun9c96f3d2018-01-31 06:06:56 -0500244 rxe_av_to_attr(&ah->av, attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300245 return 0;
246}
247
Gal Pressman2553ba22018-12-12 11:09:06 +0200248static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300249{
250 struct rxe_ah *ah = to_rah(ibah);
251
252 rxe_drop_ref(ah->pd);
253 rxe_drop_ref(ah);
254 return 0;
255}
256
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700257static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300258{
259 int err;
260 int i;
261 u32 length;
262 struct rxe_recv_wqe *recv_wqe;
263 int num_sge = ibwr->num_sge;
264
265 if (unlikely(queue_full(rq->queue))) {
266 err = -ENOMEM;
267 goto err1;
268 }
269
270 if (unlikely(num_sge > rq->max_sge)) {
271 err = -EINVAL;
272 goto err1;
273 }
274
275 length = 0;
276 for (i = 0; i < num_sge; i++)
277 length += ibwr->sg_list[i].length;
278
279 recv_wqe = producer_addr(rq->queue);
280 recv_wqe->wr_id = ibwr->wr_id;
281 recv_wqe->num_sge = num_sge;
282
283 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
284 num_sge * sizeof(struct ib_sge));
285
286 recv_wqe->dma.length = length;
287 recv_wqe->dma.resid = length;
288 recv_wqe->dma.num_sge = num_sge;
289 recv_wqe->dma.cur_sge = 0;
290 recv_wqe->dma.sge_offset = 0;
291
292 /* make sure all changes to the work queue are written before we
293 * update the producer pointer
294 */
295 smp_wmb();
296
297 advance_producer(rq->queue);
298 return 0;
299
300err1:
301 return err;
302}
303
304static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
305 struct ib_srq_init_attr *init,
306 struct ib_udata *udata)
307{
308 int err;
309 struct rxe_dev *rxe = to_rdev(ibpd->device);
310 struct rxe_pd *pd = to_rpd(ibpd);
Shamir Rabinovitch89944452019-02-07 18:44:49 +0200311 struct rxe_ucontext *ucontext =
312 rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300313 struct rxe_srq *srq;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600314 struct rxe_create_srq_resp __user *uresp = NULL;
315
316 if (udata) {
317 if (udata->outlen < sizeof(*uresp))
318 return ERR_PTR(-EINVAL);
319 uresp = udata->outbuf;
320 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300321
322 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
323 if (err)
324 goto err1;
325
326 srq = rxe_alloc(&rxe->srq_pool);
327 if (!srq) {
328 err = -ENOMEM;
329 goto err1;
330 }
331
332 rxe_add_index(srq);
333 rxe_add_ref(pd);
334 srq->pd = pd;
335
Shamir Rabinovitch89944452019-02-07 18:44:49 +0200336 err = rxe_srq_from_init(rxe, srq, init, &ucontext->ibuc, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300337 if (err)
338 goto err2;
339
340 return &srq->ibsrq;
341
342err2:
343 rxe_drop_ref(pd);
344 rxe_drop_index(srq);
345 rxe_drop_ref(srq);
346err1:
347 return ERR_PTR(err);
348}
349
350static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
351 enum ib_srq_attr_mask mask,
352 struct ib_udata *udata)
353{
354 int err;
355 struct rxe_srq *srq = to_rsrq(ibsrq);
356 struct rxe_dev *rxe = to_rdev(ibsrq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600357 struct rxe_modify_srq_cmd ucmd = {};
358
359 if (udata) {
360 if (udata->inlen < sizeof(ucmd))
361 return -EINVAL;
362
363 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
364 if (err)
365 return err;
366 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300367
368 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
369 if (err)
370 goto err1;
371
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600372 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300373 if (err)
374 goto err1;
375
376 return 0;
377
378err1:
379 return err;
380}
381
382static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
383{
384 struct rxe_srq *srq = to_rsrq(ibsrq);
385
386 if (srq->error)
387 return -EINVAL;
388
389 attr->max_wr = srq->rq.queue->buf->index_mask;
390 attr->max_sge = srq->rq.max_sge;
391 attr->srq_limit = srq->limit;
392 return 0;
393}
394
395static int rxe_destroy_srq(struct ib_srq *ibsrq)
396{
397 struct rxe_srq *srq = to_rsrq(ibsrq);
398
399 if (srq->rq.queue)
400 rxe_queue_cleanup(srq->rq.queue);
401
402 rxe_drop_ref(srq->pd);
403 rxe_drop_index(srq);
404 rxe_drop_ref(srq);
405
406 return 0;
407}
408
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700409static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
410 const struct ib_recv_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300411{
412 int err = 0;
413 unsigned long flags;
414 struct rxe_srq *srq = to_rsrq(ibsrq);
415
416 spin_lock_irqsave(&srq->rq.producer_lock, flags);
417
418 while (wr) {
419 err = post_one_recv(&srq->rq, wr);
420 if (unlikely(err))
421 break;
422 wr = wr->next;
423 }
424
425 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
426
427 if (err)
428 *bad_wr = wr;
429
430 return err;
431}
432
433static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
434 struct ib_qp_init_attr *init,
435 struct ib_udata *udata)
436{
437 int err;
438 struct rxe_dev *rxe = to_rdev(ibpd->device);
439 struct rxe_pd *pd = to_rpd(ibpd);
440 struct rxe_qp *qp;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600441 struct rxe_create_qp_resp __user *uresp = NULL;
442
443 if (udata) {
444 if (udata->outlen < sizeof(*uresp))
445 return ERR_PTR(-EINVAL);
446 uresp = udata->outbuf;
447 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300448
449 err = rxe_qp_chk_init(rxe, init);
450 if (err)
451 goto err1;
452
453 qp = rxe_alloc(&rxe->qp_pool);
454 if (!qp) {
455 err = -ENOMEM;
456 goto err1;
457 }
458
459 if (udata) {
460 if (udata->inlen) {
461 err = -EINVAL;
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500462 goto err2;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300463 }
464 qp->is_user = 1;
465 }
466
467 rxe_add_index(qp);
468
Shamir Rabinovitche00b64f2018-12-17 17:15:18 +0200469 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300470 if (err)
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500471 goto err3;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300472
473 return &qp->ibqp;
474
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500475err3:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300476 rxe_drop_index(qp);
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500477err2:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300478 rxe_drop_ref(qp);
479err1:
480 return ERR_PTR(err);
481}
482
483static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
484 int mask, struct ib_udata *udata)
485{
486 int err;
487 struct rxe_dev *rxe = to_rdev(ibqp->device);
488 struct rxe_qp *qp = to_rqp(ibqp);
489
490 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
491 if (err)
492 goto err1;
493
494 err = rxe_qp_from_attr(qp, attr, mask, udata);
495 if (err)
496 goto err1;
497
498 return 0;
499
500err1:
501 return err;
502}
503
504static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
505 int mask, struct ib_qp_init_attr *init)
506{
507 struct rxe_qp *qp = to_rqp(ibqp);
508
509 rxe_qp_to_init(qp, init);
510 rxe_qp_to_attr(qp, attr, mask);
511
512 return 0;
513}
514
515static int rxe_destroy_qp(struct ib_qp *ibqp)
516{
517 struct rxe_qp *qp = to_rqp(ibqp);
518
519 rxe_qp_destroy(qp);
520 rxe_drop_index(qp);
521 rxe_drop_ref(qp);
522 return 0;
523}
524
Bart Van Asschef696bf62018-07-18 09:25:14 -0700525static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300526 unsigned int mask, unsigned int length)
527{
528 int num_sge = ibwr->num_sge;
529 struct rxe_sq *sq = &qp->sq;
530
531 if (unlikely(num_sge > sq->max_sge))
532 goto err1;
533
534 if (unlikely(mask & WR_ATOMIC_MASK)) {
535 if (length < 8)
536 goto err1;
537
538 if (atomic_wr(ibwr)->remote_addr & 0x7)
539 goto err1;
540 }
541
542 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
543 (length > sq->max_inline)))
544 goto err1;
545
546 return 0;
547
548err1:
549 return -EINVAL;
550}
551
552static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
Bart Van Asschef696bf62018-07-18 09:25:14 -0700553 const struct ib_send_wr *ibwr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300554{
555 wr->wr_id = ibwr->wr_id;
556 wr->num_sge = ibwr->num_sge;
557 wr->opcode = ibwr->opcode;
558 wr->send_flags = ibwr->send_flags;
559
560 if (qp_type(qp) == IB_QPT_UD ||
561 qp_type(qp) == IB_QPT_SMI ||
562 qp_type(qp) == IB_QPT_GSI) {
563 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
564 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
565 if (qp_type(qp) == IB_QPT_GSI)
566 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
567 if (wr->opcode == IB_WR_SEND_WITH_IMM)
568 wr->ex.imm_data = ibwr->ex.imm_data;
569 } else {
570 switch (wr->opcode) {
571 case IB_WR_RDMA_WRITE_WITH_IMM:
572 wr->ex.imm_data = ibwr->ex.imm_data;
Bart Van Asscheea6ee932017-10-11 10:49:24 -0700573 /* fall through */
Moni Shoua8700e3e2016-06-16 16:45:23 +0300574 case IB_WR_RDMA_READ:
575 case IB_WR_RDMA_WRITE:
576 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
577 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
578 break;
579 case IB_WR_SEND_WITH_IMM:
580 wr->ex.imm_data = ibwr->ex.imm_data;
581 break;
582 case IB_WR_SEND_WITH_INV:
583 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
584 break;
585 case IB_WR_ATOMIC_CMP_AND_SWP:
586 case IB_WR_ATOMIC_FETCH_AND_ADD:
587 wr->wr.atomic.remote_addr =
588 atomic_wr(ibwr)->remote_addr;
589 wr->wr.atomic.compare_add =
590 atomic_wr(ibwr)->compare_add;
591 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
592 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
593 break;
594 case IB_WR_LOCAL_INV:
595 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
596 break;
597 case IB_WR_REG_MR:
598 wr->wr.reg.mr = reg_wr(ibwr)->mr;
599 wr->wr.reg.key = reg_wr(ibwr)->key;
600 wr->wr.reg.access = reg_wr(ibwr)->access;
601 break;
602 default:
603 break;
604 }
605 }
606}
607
Bart Van Asschef696bf62018-07-18 09:25:14 -0700608static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300609 unsigned int mask, unsigned int length,
610 struct rxe_send_wqe *wqe)
611{
612 int num_sge = ibwr->num_sge;
613 struct ib_sge *sge;
614 int i;
615 u8 *p;
616
617 init_send_wr(qp, &wqe->wr, ibwr);
618
619 if (qp_type(qp) == IB_QPT_UD ||
620 qp_type(qp) == IB_QPT_SMI ||
621 qp_type(qp) == IB_QPT_GSI)
622 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
623
624 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
625 p = wqe->dma.inline_data;
626
627 sge = ibwr->sg_list;
628 for (i = 0; i < num_sge; i++, sge++) {
Jia-Ju Bai07d432b2017-06-05 20:23:40 +0800629 memcpy(p, (void *)(uintptr_t)sge->addr,
630 sge->length);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300631
632 p += sge->length;
633 }
634 } else if (mask & WR_REG_MASK) {
635 wqe->mask = mask;
636 wqe->state = wqe_state_posted;
637 return 0;
638 } else
639 memcpy(wqe->dma.sge, ibwr->sg_list,
640 num_sge * sizeof(struct ib_sge));
641
Bart Van Asschea6544a62018-03-01 14:00:29 -0800642 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
643 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300644 wqe->mask = mask;
645 wqe->dma.length = length;
646 wqe->dma.resid = length;
647 wqe->dma.num_sge = num_sge;
648 wqe->dma.cur_sge = 0;
649 wqe->dma.sge_offset = 0;
650 wqe->state = wqe_state_posted;
651 wqe->ssn = atomic_add_return(1, &qp->ssn);
652
653 return 0;
654}
655
Bart Van Asschef696bf62018-07-18 09:25:14 -0700656static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
Parav Pandite404f942016-09-28 20:26:26 +0000657 unsigned int mask, u32 length)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300658{
659 int err;
660 struct rxe_sq *sq = &qp->sq;
661 struct rxe_send_wqe *send_wqe;
662 unsigned long flags;
663
664 err = validate_send_wr(qp, ibwr, mask, length);
665 if (err)
666 return err;
667
668 spin_lock_irqsave(&qp->sq.sq_lock, flags);
669
670 if (unlikely(queue_full(sq->queue))) {
671 err = -ENOMEM;
672 goto err1;
673 }
674
675 send_wqe = producer_addr(sq->queue);
676
677 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
678 if (unlikely(err))
679 goto err1;
680
681 /*
682 * make sure all changes to the work queue are
683 * written before we update the producer pointer
684 */
685 smp_wmb();
686
687 advance_producer(sq->queue);
688 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
689
690 return 0;
691
692err1:
693 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
694 return err;
695}
696
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700697static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
698 const struct ib_send_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300699{
700 int err = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300701 unsigned int mask;
702 unsigned int length = 0;
703 int i;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300704
Moni Shoua8700e3e2016-06-16 16:45:23 +0300705 while (wr) {
706 mask = wr_opcode_mask(wr->opcode, qp);
707 if (unlikely(!mask)) {
708 err = -EINVAL;
709 *bad_wr = wr;
710 break;
711 }
712
713 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
714 !(mask & WR_INLINE_MASK))) {
715 err = -EINVAL;
716 *bad_wr = wr;
717 break;
718 }
719
720 length = 0;
721 for (i = 0; i < wr->num_sge; i++)
722 length += wr->sg_list[i].length;
723
724 err = post_one_send(qp, wr, mask, length);
725
726 if (err) {
727 *bad_wr = wr;
728 break;
729 }
730 wr = wr->next;
731 }
732
Alexandru Moise1661d3b02018-05-08 11:02:02 +0200733 rxe_run_task(&qp->req.task, 1);
Bart Van Assche6f301e02018-01-09 11:23:40 -0800734 if (unlikely(qp->req.state == QP_STATE_ERROR))
735 rxe_run_task(&qp->comp.task, 1);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300736
737 return err;
738}
739
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700740static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
741 const struct ib_send_wr **bad_wr)
Parav Pandit063af592016-09-28 20:24:12 +0000742{
743 struct rxe_qp *qp = to_rqp(ibqp);
744
745 if (unlikely(!qp->valid)) {
746 *bad_wr = wr;
747 return -EINVAL;
748 }
749
750 if (unlikely(qp->req.state < QP_STATE_READY)) {
751 *bad_wr = wr;
752 return -EINVAL;
753 }
754
755 if (qp->is_user) {
756 /* Utilize process context to do protocol processing */
757 rxe_run_task(&qp->req.task, 0);
758 return 0;
759 } else
760 return rxe_post_send_kernel(qp, wr, bad_wr);
761}
762
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700763static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
764 const struct ib_recv_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300765{
766 int err = 0;
767 struct rxe_qp *qp = to_rqp(ibqp);
768 struct rxe_rq *rq = &qp->rq;
769 unsigned long flags;
770
771 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
772 *bad_wr = wr;
773 err = -EINVAL;
774 goto err1;
775 }
776
777 if (unlikely(qp->srq)) {
778 *bad_wr = wr;
779 err = -EINVAL;
780 goto err1;
781 }
782
783 spin_lock_irqsave(&rq->producer_lock, flags);
784
785 while (wr) {
786 err = post_one_recv(rq, wr);
787 if (unlikely(err)) {
788 *bad_wr = wr;
789 break;
790 }
791 wr = wr->next;
792 }
793
794 spin_unlock_irqrestore(&rq->producer_lock, flags);
795
Vijay Immanuel12171972017-06-27 12:19:38 +0300796 if (qp->resp.state == QP_STATE_ERROR)
797 rxe_run_task(&qp->resp.task, 1);
798
Moni Shoua8700e3e2016-06-16 16:45:23 +0300799err1:
800 return err;
801}
802
803static struct ib_cq *rxe_create_cq(struct ib_device *dev,
804 const struct ib_cq_init_attr *attr,
805 struct ib_ucontext *context,
806 struct ib_udata *udata)
807{
808 int err;
809 struct rxe_dev *rxe = to_rdev(dev);
810 struct rxe_cq *cq;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600811 struct rxe_create_cq_resp __user *uresp = NULL;
812
813 if (udata) {
814 if (udata->outlen < sizeof(*uresp))
815 return ERR_PTR(-EINVAL);
816 uresp = udata->outbuf;
817 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300818
819 if (attr->flags)
820 return ERR_PTR(-EINVAL);
821
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600822 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300823 if (err)
824 goto err1;
825
826 cq = rxe_alloc(&rxe->cq_pool);
827 if (!cq) {
828 err = -ENOMEM;
829 goto err1;
830 }
831
832 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600833 context, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300834 if (err)
835 goto err2;
836
837 return &cq->ibcq;
838
839err2:
840 rxe_drop_ref(cq);
841err1:
842 return ERR_PTR(err);
843}
844
845static int rxe_destroy_cq(struct ib_cq *ibcq)
846{
847 struct rxe_cq *cq = to_rcq(ibcq);
848
Andrew Boyerbfc3ae02017-08-28 16:11:50 -0400849 rxe_cq_disable(cq);
850
Moni Shoua8700e3e2016-06-16 16:45:23 +0300851 rxe_drop_ref(cq);
852 return 0;
853}
854
855static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
856{
857 int err;
858 struct rxe_cq *cq = to_rcq(ibcq);
859 struct rxe_dev *rxe = to_rdev(ibcq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600860 struct rxe_resize_cq_resp __user *uresp = NULL;
861
862 if (udata) {
863 if (udata->outlen < sizeof(*uresp))
864 return -EINVAL;
865 uresp = udata->outbuf;
866 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300867
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600868 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300869 if (err)
870 goto err1;
871
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600872 err = rxe_cq_resize_queue(cq, cqe, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300873 if (err)
874 goto err1;
875
876 return 0;
877
878err1:
879 return err;
880}
881
882static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
883{
884 int i;
885 struct rxe_cq *cq = to_rcq(ibcq);
886 struct rxe_cqe *cqe;
887 unsigned long flags;
888
889 spin_lock_irqsave(&cq->cq_lock, flags);
890 for (i = 0; i < num_entries; i++) {
891 cqe = queue_head(cq->queue);
892 if (!cqe)
893 break;
894
895 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
896 advance_consumer(cq->queue);
897 }
898 spin_unlock_irqrestore(&cq->cq_lock, flags);
899
900 return i;
901}
902
903static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
904{
905 struct rxe_cq *cq = to_rcq(ibcq);
906 int count = queue_count(cq->queue);
907
908 return (count > wc_cnt) ? wc_cnt : count;
909}
910
911static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
912{
913 struct rxe_cq *cq = to_rcq(ibcq);
Andrew Boyeraccacb82016-11-23 12:39:22 -0500914 unsigned long irq_flags;
915 int ret = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300916
Andrew Boyeraccacb82016-11-23 12:39:22 -0500917 spin_lock_irqsave(&cq->cq_lock, irq_flags);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300918 if (cq->notify != IB_CQ_NEXT_COMP)
919 cq->notify = flags & IB_CQ_SOLICITED_MASK;
920
Andrew Boyeraccacb82016-11-23 12:39:22 -0500921 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
922 ret = 1;
923
924 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
925
926 return ret;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300927}
928
929static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
930{
931 struct rxe_dev *rxe = to_rdev(ibpd->device);
932 struct rxe_pd *pd = to_rpd(ibpd);
933 struct rxe_mem *mr;
934 int err;
935
936 mr = rxe_alloc(&rxe->mr_pool);
937 if (!mr) {
938 err = -ENOMEM;
939 goto err1;
940 }
941
942 rxe_add_index(mr);
943
944 rxe_add_ref(pd);
945
Zhu Yanjune12ee8c2018-04-23 03:57:58 -0400946 err = rxe_mem_init_dma(pd, access, mr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300947 if (err)
948 goto err2;
949
950 return &mr->ibmr;
951
952err2:
953 rxe_drop_ref(pd);
954 rxe_drop_index(mr);
955 rxe_drop_ref(mr);
956err1:
957 return ERR_PTR(err);
958}
959
960static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
961 u64 start,
962 u64 length,
963 u64 iova,
964 int access, struct ib_udata *udata)
965{
966 int err;
967 struct rxe_dev *rxe = to_rdev(ibpd->device);
968 struct rxe_pd *pd = to_rpd(ibpd);
969 struct rxe_mem *mr;
970
971 mr = rxe_alloc(&rxe->mr_pool);
972 if (!mr) {
973 err = -ENOMEM;
974 goto err2;
975 }
976
977 rxe_add_index(mr);
978
979 rxe_add_ref(pd);
980
Zhu Yanjune12ee8c2018-04-23 03:57:58 -0400981 err = rxe_mem_init_user(pd, start, length, iova,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300982 access, udata, mr);
983 if (err)
984 goto err3;
985
986 return &mr->ibmr;
987
988err3:
989 rxe_drop_ref(pd);
990 rxe_drop_index(mr);
991 rxe_drop_ref(mr);
992err2:
993 return ERR_PTR(err);
994}
995
996static int rxe_dereg_mr(struct ib_mr *ibmr)
997{
998 struct rxe_mem *mr = to_rmr(ibmr);
999
1000 mr->state = RXE_MEM_STATE_ZOMBIE;
1001 rxe_drop_ref(mr->pd);
1002 rxe_drop_index(mr);
1003 rxe_drop_ref(mr);
1004 return 0;
1005}
1006
1007static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1008 enum ib_mr_type mr_type,
1009 u32 max_num_sg)
1010{
1011 struct rxe_dev *rxe = to_rdev(ibpd->device);
1012 struct rxe_pd *pd = to_rpd(ibpd);
1013 struct rxe_mem *mr;
1014 int err;
1015
1016 if (mr_type != IB_MR_TYPE_MEM_REG)
1017 return ERR_PTR(-EINVAL);
1018
1019 mr = rxe_alloc(&rxe->mr_pool);
1020 if (!mr) {
1021 err = -ENOMEM;
1022 goto err1;
1023 }
1024
1025 rxe_add_index(mr);
1026
1027 rxe_add_ref(pd);
1028
Zhu Yanjune12ee8c2018-04-23 03:57:58 -04001029 err = rxe_mem_init_fast(pd, max_num_sg, mr);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001030 if (err)
1031 goto err2;
1032
1033 return &mr->ibmr;
1034
1035err2:
1036 rxe_drop_ref(pd);
1037 rxe_drop_index(mr);
1038 rxe_drop_ref(mr);
1039err1:
1040 return ERR_PTR(err);
1041}
1042
1043static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1044{
1045 struct rxe_mem *mr = to_rmr(ibmr);
1046 struct rxe_map *map;
1047 struct rxe_phys_buf *buf;
1048
1049 if (unlikely(mr->nbuf == mr->num_buf))
1050 return -ENOMEM;
1051
1052 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1053 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1054
1055 buf->addr = addr;
1056 buf->size = ibmr->page_size;
1057 mr->nbuf++;
1058
1059 return 0;
1060}
1061
Parav Pandite404f942016-09-28 20:26:26 +00001062static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1063 int sg_nents, unsigned int *sg_offset)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001064{
1065 struct rxe_mem *mr = to_rmr(ibmr);
1066 int n;
1067
1068 mr->nbuf = 0;
1069
1070 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1071
1072 mr->va = ibmr->iova;
1073 mr->iova = ibmr->iova;
1074 mr->length = ibmr->length;
1075 mr->page_shift = ilog2(ibmr->page_size);
1076 mr->page_mask = ibmr->page_size - 1;
1077 mr->offset = mr->iova & mr->page_mask;
1078
1079 return n;
1080}
1081
1082static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1083{
1084 int err;
1085 struct rxe_dev *rxe = to_rdev(ibqp->device);
1086 struct rxe_qp *qp = to_rqp(ibqp);
1087 struct rxe_mc_grp *grp;
1088
1089 /* takes a ref on grp if successful */
1090 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1091 if (err)
1092 return err;
1093
1094 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1095
1096 rxe_drop_ref(grp);
1097 return err;
1098}
1099
1100static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1101{
1102 struct rxe_dev *rxe = to_rdev(ibqp->device);
1103 struct rxe_qp *qp = to_rqp(ibqp);
1104
1105 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1106}
1107
Kamal Heibc05d2662017-06-15 11:29:05 +03001108static ssize_t parent_show(struct device *device,
1109 struct device_attribute *attr, char *buf)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001110{
Parav Pandit54747232018-12-18 14:15:56 +02001111 struct rxe_dev *rxe =
1112 rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001113
Bart Van Assche839f5ac2017-01-10 11:15:53 -08001114 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001115}
1116
Kamal Heibc05d2662017-06-15 11:29:05 +03001117static DEVICE_ATTR_RO(parent);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001118
Parav Pandit508a5232018-10-11 22:31:54 +03001119static struct attribute *rxe_dev_attributes[] = {
1120 &dev_attr_parent.attr,
1121 NULL
1122};
1123
1124static const struct attribute_group rxe_attr_group = {
1125 .attrs = rxe_dev_attributes,
Moni Shoua8700e3e2016-06-16 16:45:23 +03001126};
1127
Jason Gunthorpeca223542019-02-12 21:12:56 -07001128static int rxe_enable_driver(struct ib_device *ib_dev)
1129{
1130 struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);
1131
1132 rxe_set_port_state(rxe);
1133 dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
1134 return 0;
1135}
1136
Kamal Heib573efc42018-12-10 21:09:46 +02001137static const struct ib_device_ops rxe_dev_ops = {
1138 .alloc_hw_stats = rxe_ib_alloc_hw_stats,
1139 .alloc_mr = rxe_alloc_mr,
1140 .alloc_pd = rxe_alloc_pd,
1141 .alloc_ucontext = rxe_alloc_ucontext,
1142 .attach_mcast = rxe_attach_mcast,
1143 .create_ah = rxe_create_ah,
1144 .create_cq = rxe_create_cq,
1145 .create_qp = rxe_create_qp,
1146 .create_srq = rxe_create_srq,
Jason Gunthorpec3670742019-01-22 16:27:24 -07001147 .dealloc_driver = rxe_dealloc,
Kamal Heib573efc42018-12-10 21:09:46 +02001148 .dealloc_pd = rxe_dealloc_pd,
1149 .dealloc_ucontext = rxe_dealloc_ucontext,
1150 .dereg_mr = rxe_dereg_mr,
1151 .destroy_ah = rxe_destroy_ah,
1152 .destroy_cq = rxe_destroy_cq,
1153 .destroy_qp = rxe_destroy_qp,
1154 .destroy_srq = rxe_destroy_srq,
1155 .detach_mcast = rxe_detach_mcast,
Jason Gunthorpeca223542019-02-12 21:12:56 -07001156 .enable_driver = rxe_enable_driver,
Kamal Heib573efc42018-12-10 21:09:46 +02001157 .get_dma_mr = rxe_get_dma_mr,
1158 .get_hw_stats = rxe_ib_get_hw_stats,
1159 .get_link_layer = rxe_get_link_layer,
Kamal Heib573efc42018-12-10 21:09:46 +02001160 .get_port_immutable = rxe_port_immutable,
1161 .map_mr_sg = rxe_map_mr_sg,
1162 .mmap = rxe_mmap,
1163 .modify_ah = rxe_modify_ah,
1164 .modify_device = rxe_modify_device,
1165 .modify_port = rxe_modify_port,
1166 .modify_qp = rxe_modify_qp,
1167 .modify_srq = rxe_modify_srq,
1168 .peek_cq = rxe_peek_cq,
1169 .poll_cq = rxe_poll_cq,
1170 .post_recv = rxe_post_recv,
1171 .post_send = rxe_post_send,
1172 .post_srq_recv = rxe_post_srq_recv,
1173 .query_ah = rxe_query_ah,
1174 .query_device = rxe_query_device,
1175 .query_pkey = rxe_query_pkey,
1176 .query_port = rxe_query_port,
1177 .query_qp = rxe_query_qp,
1178 .query_srq = rxe_query_srq,
1179 .reg_user_mr = rxe_reg_user_mr,
1180 .req_notify_cq = rxe_req_notify_cq,
1181 .resize_cq = rxe_resize_cq,
Leon Romanovsky21a428a2019-02-03 14:55:51 +02001182 INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
Kamal Heib573efc42018-12-10 21:09:46 +02001183};
1184
Moni Shoua8700e3e2016-06-16 16:45:23 +03001185int rxe_register_device(struct rxe_dev *rxe)
1186{
1187 int err;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001188 struct ib_device *dev = &rxe->ib_dev;
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001189 struct crypto_shash *tfm;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001190
Moni Shoua8700e3e2016-06-16 16:45:23 +03001191 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1192
1193 dev->owner = THIS_MODULE;
1194 dev->node_type = RDMA_NODE_IB_CA;
1195 dev->phys_port_cnt = 1;
Sagi Grimberg67cf3622017-05-04 16:23:07 +03001196 dev->num_comp_vectors = num_possible_cpus();
Bart Van Assche85e9f1d2017-01-20 13:04:29 -08001197 dev->dev.parent = rxe_dma_device(rxe);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001198 dev->local_dma_lkey = 0;
Yuval Shaia4d6f2852017-03-14 16:01:57 +02001199 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1200 rxe->ndev->dev_addr);
Bart Van Assche0bbb3b72017-01-20 13:04:37 -08001201 dev->dev.dma_ops = &dma_virt_ops;
yonatanc56012e12017-06-22 17:10:00 +03001202 dma_coerce_mask_and_coherent(&dev->dev,
Mikhail Malyginefc365e2018-04-02 12:26:59 +03001203 dma_get_required_mask(&dev->dev));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001204
1205 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1206 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1207 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1208 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1209 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1210 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1211 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1212 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1213 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1214 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1215 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1216 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1217 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1218 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1219 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1220 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1221 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1222 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1223 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1224 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1225 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1226 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1227 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1228 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1229 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1230 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1231 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1232 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1233 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1234 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1235 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1236 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1237 ;
1238
Kamal Heib573efc42018-12-10 21:09:46 +02001239 ib_set_device_ops(dev, &rxe_dev_ops);
Jason Gunthorpe4c173f52019-02-12 21:12:52 -07001240 err = ib_device_set_netdev(&rxe->ib_dev, rxe->ndev, 1);
1241 if (err)
1242 return err;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001243
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001244 tfm = crypto_alloc_shash("crc32", 0, 0);
1245 if (IS_ERR(tfm)) {
Colin Ian King27b0b832017-04-24 10:26:42 +01001246 pr_err("failed to allocate crc algorithm err:%ld\n",
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001247 PTR_ERR(tfm));
1248 return PTR_ERR(tfm);
yonatanccee26882017-04-20 20:55:55 +03001249 }
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001250 rxe->tfm = tfm;
yonatanccee26882017-04-20 20:55:55 +03001251
Parav Pandit508a5232018-10-11 22:31:54 +03001252 rdma_set_device_sysfs_group(dev, &rxe_attr_group);
Matan Barak0ede73b2018-03-19 15:02:34 +02001253 dev->driver_id = RDMA_DRIVER_RXE;
Parav Panditea4baf72018-12-18 14:28:30 +02001254 err = ib_register_device(dev, "rxe%d");
Jason Gunthorpec3670742019-01-22 16:27:24 -07001255 if (err)
Kamal Heib61013822017-06-15 11:29:06 +03001256 pr_warn("%s failed with error %d\n", __func__, err);
yonatanccee26882017-04-20 20:55:55 +03001257
Jason Gunthorpeca223542019-02-12 21:12:56 -07001258 /*
1259 * Note that rxe may be invalid at this point if another thread
1260 * unregistered it.
1261 */
Moni Shoua8700e3e2016-06-16 16:45:23 +03001262 return err;
1263}