blob: ffca654c8697937e61a05b0f226fa5e3635001b0 [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Bart Van Assche0bbb3b72017-01-20 13:04:37 -080034#include <linux/dma-mapping.h>
Yuval Shaia4d6f2852017-03-14 16:01:57 +020035#include <net/addrconf.h>
Shamir Rabinovitch89944452019-02-07 18:44:49 +020036#include <rdma/uverbs_ioctl.h>
Moni Shoua8700e3e2016-06-16 16:45:23 +030037#include "rxe.h"
38#include "rxe_loc.h"
39#include "rxe_queue.h"
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +020040#include "rxe_hw_counters.h"
Moni Shoua8700e3e2016-06-16 16:45:23 +030041
42static int rxe_query_device(struct ib_device *dev,
43 struct ib_device_attr *attr,
44 struct ib_udata *uhw)
45{
46 struct rxe_dev *rxe = to_rdev(dev);
47
48 if (uhw->inlen || uhw->outlen)
49 return -EINVAL;
50
51 *attr = rxe->attr;
52 return 0;
53}
54
Moni Shoua8700e3e2016-06-16 16:45:23 +030055static int rxe_query_port(struct ib_device *dev,
56 u8 port_num, struct ib_port_attr *attr)
57{
58 struct rxe_dev *rxe = to_rdev(dev);
59 struct rxe_port *port;
Yuval Shaia59590b82018-12-09 13:06:10 +020060 int rc;
Moni Shoua8700e3e2016-06-16 16:45:23 +030061
62 port = &rxe->port;
63
Or Gerlitzc4550c62017-01-24 13:02:39 +020064 /* *attr being zeroed by the caller, avoid zeroing it here */
Moni Shoua8700e3e2016-06-16 16:45:23 +030065 *attr = port->attr;
66
67 mutex_lock(&rxe->usdev_lock);
Yuval Shaiad4186192017-06-14 23:13:34 +030068 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
69 &attr->active_width);
Andrew Boyer5736c7c2018-11-01 09:18:45 -040070
71 if (attr->state == IB_PORT_ACTIVE)
72 attr->phys_state = RDMA_LINK_PHYS_STATE_LINK_UP;
73 else if (dev_get_flags(rxe->ndev) & IFF_UP)
74 attr->phys_state = RDMA_LINK_PHYS_STATE_POLLING;
75 else
76 attr->phys_state = RDMA_LINK_PHYS_STATE_DISABLED;
77
Moni Shoua8700e3e2016-06-16 16:45:23 +030078 mutex_unlock(&rxe->usdev_lock);
79
Yuval Shaiad4186192017-06-14 23:13:34 +030080 return rc;
Moni Shoua8700e3e2016-06-16 16:45:23 +030081}
82
Moni Shoua8700e3e2016-06-16 16:45:23 +030083static struct net_device *rxe_get_netdev(struct ib_device *device,
84 u8 port_num)
85{
86 struct rxe_dev *rxe = to_rdev(device);
87
88 if (rxe->ndev) {
89 dev_hold(rxe->ndev);
90 return rxe->ndev;
91 }
92
93 return NULL;
94}
95
96static int rxe_query_pkey(struct ib_device *device,
97 u8 port_num, u16 index, u16 *pkey)
98{
99 struct rxe_dev *rxe = to_rdev(device);
100 struct rxe_port *port;
101
Moni Shoua8700e3e2016-06-16 16:45:23 +0300102 port = &rxe->port;
103
104 if (unlikely(index >= port->attr.pkey_tbl_len)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800105 dev_warn(device->dev.parent, "invalid index = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300106 index);
107 goto err1;
108 }
109
110 *pkey = port->pkey_tbl[index];
111 return 0;
112
113err1:
114 return -EINVAL;
115}
116
117static int rxe_modify_device(struct ib_device *dev,
118 int mask, struct ib_device_modify *attr)
119{
120 struct rxe_dev *rxe = to_rdev(dev);
121
122 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
123 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
124
125 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
126 memcpy(rxe->ib_dev.node_desc,
127 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
128 }
129
130 return 0;
131}
132
133static int rxe_modify_port(struct ib_device *dev,
134 u8 port_num, int mask, struct ib_port_modify *attr)
135{
136 struct rxe_dev *rxe = to_rdev(dev);
137 struct rxe_port *port;
138
Moni Shoua8700e3e2016-06-16 16:45:23 +0300139 port = &rxe->port;
140
141 port->attr.port_cap_flags |= attr->set_port_cap_mask;
142 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
143
144 if (mask & IB_PORT_RESET_QKEY_CNTR)
145 port->attr.qkey_viol_cntr = 0;
146
147 return 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300148}
149
150static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
151 u8 port_num)
152{
153 struct rxe_dev *rxe = to_rdev(dev);
154
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800155 return rxe_link_layer(rxe, port_num);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300156}
157
158static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
159 struct ib_udata *udata)
160{
161 struct rxe_dev *rxe = to_rdev(dev);
162 struct rxe_ucontext *uc;
163
164 uc = rxe_alloc(&rxe->uc_pool);
165 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
166}
167
168static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
169{
170 struct rxe_ucontext *uc = to_ruc(ibuc);
171
172 rxe_drop_ref(uc);
173 return 0;
174}
175
176static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
177 struct ib_port_immutable *immutable)
178{
179 int err;
180 struct ib_port_attr attr;
181
Or Gerlitzc4550c62017-01-24 13:02:39 +0200182 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
183
184 err = ib_query_port(dev, port_num, &attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300185 if (err)
186 return err;
187
188 immutable->pkey_tbl_len = attr.pkey_tbl_len;
189 immutable->gid_tbl_len = attr.gid_tbl_len;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300190 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
191
192 return 0;
193}
194
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200195static int rxe_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
196 struct ib_udata *udata)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300197{
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200198 struct rxe_dev *rxe = to_rdev(ibpd->device);
199 struct rxe_pd *pd = to_rpd(ibpd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300200
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200201 return rxe_add_to_pool(&rxe->pd_pool, &pd->pelem);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300202}
203
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200204static void rxe_dealloc_pd(struct ib_pd *ibpd)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300205{
206 struct rxe_pd *pd = to_rpd(ibpd);
207
208 rxe_drop_ref(pd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300209}
210
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400211static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
212 struct rdma_ah_attr *attr,
Gal Pressmanb090c4e2018-12-12 11:09:05 +0200213 u32 flags,
Moni Shoua477864c2016-11-23 08:23:24 +0200214 struct ib_udata *udata)
215
Moni Shoua8700e3e2016-06-16 16:45:23 +0300216{
217 int err;
218 struct rxe_dev *rxe = to_rdev(ibpd->device);
219 struct rxe_pd *pd = to_rpd(ibpd);
220 struct rxe_ah *ah;
221
222 err = rxe_av_chk_attr(rxe, attr);
223 if (err)
Bart Van Assche2f229bc2018-07-06 09:45:32 -0700224 return ERR_PTR(err);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300225
226 ah = rxe_alloc(&rxe->ah_pool);
Bart Van Assche2f229bc2018-07-06 09:45:32 -0700227 if (!ah)
228 return ERR_PTR(-ENOMEM);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300229
230 rxe_add_ref(pd);
231 ah->pd = pd;
232
Kamal Heibfa407182019-01-29 12:08:49 +0200233 rxe_init_av(attr, &ah->av);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300234 return &ah->ibah;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300235}
236
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400237static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300238{
239 int err;
240 struct rxe_dev *rxe = to_rdev(ibah->device);
241 struct rxe_ah *ah = to_rah(ibah);
242
243 err = rxe_av_chk_attr(rxe, attr);
244 if (err)
245 return err;
246
Kamal Heibfa407182019-01-29 12:08:49 +0200247 rxe_init_av(attr, &ah->av);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300248 return 0;
249}
250
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400251static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300252{
Moni Shoua8700e3e2016-06-16 16:45:23 +0300253 struct rxe_ah *ah = to_rah(ibah);
254
Dasaratharaman Chandramoulieca7ddf2017-04-29 14:41:17 -0400255 memset(attr, 0, sizeof(*attr));
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400256 attr->type = ibah->type;
Zhu Yanjun9c96f3d2018-01-31 06:06:56 -0500257 rxe_av_to_attr(&ah->av, attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300258 return 0;
259}
260
Gal Pressman2553ba22018-12-12 11:09:06 +0200261static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300262{
263 struct rxe_ah *ah = to_rah(ibah);
264
265 rxe_drop_ref(ah->pd);
266 rxe_drop_ref(ah);
267 return 0;
268}
269
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700270static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300271{
272 int err;
273 int i;
274 u32 length;
275 struct rxe_recv_wqe *recv_wqe;
276 int num_sge = ibwr->num_sge;
277
278 if (unlikely(queue_full(rq->queue))) {
279 err = -ENOMEM;
280 goto err1;
281 }
282
283 if (unlikely(num_sge > rq->max_sge)) {
284 err = -EINVAL;
285 goto err1;
286 }
287
288 length = 0;
289 for (i = 0; i < num_sge; i++)
290 length += ibwr->sg_list[i].length;
291
292 recv_wqe = producer_addr(rq->queue);
293 recv_wqe->wr_id = ibwr->wr_id;
294 recv_wqe->num_sge = num_sge;
295
296 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
297 num_sge * sizeof(struct ib_sge));
298
299 recv_wqe->dma.length = length;
300 recv_wqe->dma.resid = length;
301 recv_wqe->dma.num_sge = num_sge;
302 recv_wqe->dma.cur_sge = 0;
303 recv_wqe->dma.sge_offset = 0;
304
305 /* make sure all changes to the work queue are written before we
306 * update the producer pointer
307 */
308 smp_wmb();
309
310 advance_producer(rq->queue);
311 return 0;
312
313err1:
314 return err;
315}
316
317static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
318 struct ib_srq_init_attr *init,
319 struct ib_udata *udata)
320{
321 int err;
322 struct rxe_dev *rxe = to_rdev(ibpd->device);
323 struct rxe_pd *pd = to_rpd(ibpd);
Shamir Rabinovitch89944452019-02-07 18:44:49 +0200324 struct rxe_ucontext *ucontext =
325 rdma_udata_to_drv_context(udata, struct rxe_ucontext, ibuc);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300326 struct rxe_srq *srq;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600327 struct rxe_create_srq_resp __user *uresp = NULL;
328
329 if (udata) {
330 if (udata->outlen < sizeof(*uresp))
331 return ERR_PTR(-EINVAL);
332 uresp = udata->outbuf;
333 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300334
335 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
336 if (err)
337 goto err1;
338
339 srq = rxe_alloc(&rxe->srq_pool);
340 if (!srq) {
341 err = -ENOMEM;
342 goto err1;
343 }
344
345 rxe_add_index(srq);
346 rxe_add_ref(pd);
347 srq->pd = pd;
348
Shamir Rabinovitch89944452019-02-07 18:44:49 +0200349 err = rxe_srq_from_init(rxe, srq, init, &ucontext->ibuc, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300350 if (err)
351 goto err2;
352
353 return &srq->ibsrq;
354
355err2:
356 rxe_drop_ref(pd);
357 rxe_drop_index(srq);
358 rxe_drop_ref(srq);
359err1:
360 return ERR_PTR(err);
361}
362
363static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
364 enum ib_srq_attr_mask mask,
365 struct ib_udata *udata)
366{
367 int err;
368 struct rxe_srq *srq = to_rsrq(ibsrq);
369 struct rxe_dev *rxe = to_rdev(ibsrq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600370 struct rxe_modify_srq_cmd ucmd = {};
371
372 if (udata) {
373 if (udata->inlen < sizeof(ucmd))
374 return -EINVAL;
375
376 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
377 if (err)
378 return err;
379 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300380
381 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
382 if (err)
383 goto err1;
384
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600385 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300386 if (err)
387 goto err1;
388
389 return 0;
390
391err1:
392 return err;
393}
394
395static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
396{
397 struct rxe_srq *srq = to_rsrq(ibsrq);
398
399 if (srq->error)
400 return -EINVAL;
401
402 attr->max_wr = srq->rq.queue->buf->index_mask;
403 attr->max_sge = srq->rq.max_sge;
404 attr->srq_limit = srq->limit;
405 return 0;
406}
407
408static int rxe_destroy_srq(struct ib_srq *ibsrq)
409{
410 struct rxe_srq *srq = to_rsrq(ibsrq);
411
412 if (srq->rq.queue)
413 rxe_queue_cleanup(srq->rq.queue);
414
415 rxe_drop_ref(srq->pd);
416 rxe_drop_index(srq);
417 rxe_drop_ref(srq);
418
419 return 0;
420}
421
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700422static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
423 const struct ib_recv_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300424{
425 int err = 0;
426 unsigned long flags;
427 struct rxe_srq *srq = to_rsrq(ibsrq);
428
429 spin_lock_irqsave(&srq->rq.producer_lock, flags);
430
431 while (wr) {
432 err = post_one_recv(&srq->rq, wr);
433 if (unlikely(err))
434 break;
435 wr = wr->next;
436 }
437
438 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
439
440 if (err)
441 *bad_wr = wr;
442
443 return err;
444}
445
446static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
447 struct ib_qp_init_attr *init,
448 struct ib_udata *udata)
449{
450 int err;
451 struct rxe_dev *rxe = to_rdev(ibpd->device);
452 struct rxe_pd *pd = to_rpd(ibpd);
453 struct rxe_qp *qp;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600454 struct rxe_create_qp_resp __user *uresp = NULL;
455
456 if (udata) {
457 if (udata->outlen < sizeof(*uresp))
458 return ERR_PTR(-EINVAL);
459 uresp = udata->outbuf;
460 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300461
462 err = rxe_qp_chk_init(rxe, init);
463 if (err)
464 goto err1;
465
466 qp = rxe_alloc(&rxe->qp_pool);
467 if (!qp) {
468 err = -ENOMEM;
469 goto err1;
470 }
471
472 if (udata) {
473 if (udata->inlen) {
474 err = -EINVAL;
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500475 goto err2;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300476 }
477 qp->is_user = 1;
478 }
479
480 rxe_add_index(qp);
481
Shamir Rabinovitche00b64f2018-12-17 17:15:18 +0200482 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd, udata);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300483 if (err)
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500484 goto err3;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300485
486 return &qp->ibqp;
487
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500488err3:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300489 rxe_drop_index(qp);
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500490err2:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300491 rxe_drop_ref(qp);
492err1:
493 return ERR_PTR(err);
494}
495
496static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
497 int mask, struct ib_udata *udata)
498{
499 int err;
500 struct rxe_dev *rxe = to_rdev(ibqp->device);
501 struct rxe_qp *qp = to_rqp(ibqp);
502
503 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
504 if (err)
505 goto err1;
506
507 err = rxe_qp_from_attr(qp, attr, mask, udata);
508 if (err)
509 goto err1;
510
511 return 0;
512
513err1:
514 return err;
515}
516
517static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
518 int mask, struct ib_qp_init_attr *init)
519{
520 struct rxe_qp *qp = to_rqp(ibqp);
521
522 rxe_qp_to_init(qp, init);
523 rxe_qp_to_attr(qp, attr, mask);
524
525 return 0;
526}
527
528static int rxe_destroy_qp(struct ib_qp *ibqp)
529{
530 struct rxe_qp *qp = to_rqp(ibqp);
531
532 rxe_qp_destroy(qp);
533 rxe_drop_index(qp);
534 rxe_drop_ref(qp);
535 return 0;
536}
537
Bart Van Asschef696bf62018-07-18 09:25:14 -0700538static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300539 unsigned int mask, unsigned int length)
540{
541 int num_sge = ibwr->num_sge;
542 struct rxe_sq *sq = &qp->sq;
543
544 if (unlikely(num_sge > sq->max_sge))
545 goto err1;
546
547 if (unlikely(mask & WR_ATOMIC_MASK)) {
548 if (length < 8)
549 goto err1;
550
551 if (atomic_wr(ibwr)->remote_addr & 0x7)
552 goto err1;
553 }
554
555 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
556 (length > sq->max_inline)))
557 goto err1;
558
559 return 0;
560
561err1:
562 return -EINVAL;
563}
564
565static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
Bart Van Asschef696bf62018-07-18 09:25:14 -0700566 const struct ib_send_wr *ibwr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300567{
568 wr->wr_id = ibwr->wr_id;
569 wr->num_sge = ibwr->num_sge;
570 wr->opcode = ibwr->opcode;
571 wr->send_flags = ibwr->send_flags;
572
573 if (qp_type(qp) == IB_QPT_UD ||
574 qp_type(qp) == IB_QPT_SMI ||
575 qp_type(qp) == IB_QPT_GSI) {
576 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
577 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
578 if (qp_type(qp) == IB_QPT_GSI)
579 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
580 if (wr->opcode == IB_WR_SEND_WITH_IMM)
581 wr->ex.imm_data = ibwr->ex.imm_data;
582 } else {
583 switch (wr->opcode) {
584 case IB_WR_RDMA_WRITE_WITH_IMM:
585 wr->ex.imm_data = ibwr->ex.imm_data;
Bart Van Asscheea6ee932017-10-11 10:49:24 -0700586 /* fall through */
Moni Shoua8700e3e2016-06-16 16:45:23 +0300587 case IB_WR_RDMA_READ:
588 case IB_WR_RDMA_WRITE:
589 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
590 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
591 break;
592 case IB_WR_SEND_WITH_IMM:
593 wr->ex.imm_data = ibwr->ex.imm_data;
594 break;
595 case IB_WR_SEND_WITH_INV:
596 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
597 break;
598 case IB_WR_ATOMIC_CMP_AND_SWP:
599 case IB_WR_ATOMIC_FETCH_AND_ADD:
600 wr->wr.atomic.remote_addr =
601 atomic_wr(ibwr)->remote_addr;
602 wr->wr.atomic.compare_add =
603 atomic_wr(ibwr)->compare_add;
604 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
605 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
606 break;
607 case IB_WR_LOCAL_INV:
608 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
609 break;
610 case IB_WR_REG_MR:
611 wr->wr.reg.mr = reg_wr(ibwr)->mr;
612 wr->wr.reg.key = reg_wr(ibwr)->key;
613 wr->wr.reg.access = reg_wr(ibwr)->access;
614 break;
615 default:
616 break;
617 }
618 }
619}
620
Bart Van Asschef696bf62018-07-18 09:25:14 -0700621static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300622 unsigned int mask, unsigned int length,
623 struct rxe_send_wqe *wqe)
624{
625 int num_sge = ibwr->num_sge;
626 struct ib_sge *sge;
627 int i;
628 u8 *p;
629
630 init_send_wr(qp, &wqe->wr, ibwr);
631
632 if (qp_type(qp) == IB_QPT_UD ||
633 qp_type(qp) == IB_QPT_SMI ||
634 qp_type(qp) == IB_QPT_GSI)
635 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
636
637 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
638 p = wqe->dma.inline_data;
639
640 sge = ibwr->sg_list;
641 for (i = 0; i < num_sge; i++, sge++) {
Jia-Ju Bai07d432b2017-06-05 20:23:40 +0800642 memcpy(p, (void *)(uintptr_t)sge->addr,
643 sge->length);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300644
645 p += sge->length;
646 }
647 } else if (mask & WR_REG_MASK) {
648 wqe->mask = mask;
649 wqe->state = wqe_state_posted;
650 return 0;
651 } else
652 memcpy(wqe->dma.sge, ibwr->sg_list,
653 num_sge * sizeof(struct ib_sge));
654
Bart Van Asschea6544a62018-03-01 14:00:29 -0800655 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
656 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300657 wqe->mask = mask;
658 wqe->dma.length = length;
659 wqe->dma.resid = length;
660 wqe->dma.num_sge = num_sge;
661 wqe->dma.cur_sge = 0;
662 wqe->dma.sge_offset = 0;
663 wqe->state = wqe_state_posted;
664 wqe->ssn = atomic_add_return(1, &qp->ssn);
665
666 return 0;
667}
668
Bart Van Asschef696bf62018-07-18 09:25:14 -0700669static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
Parav Pandite404f942016-09-28 20:26:26 +0000670 unsigned int mask, u32 length)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300671{
672 int err;
673 struct rxe_sq *sq = &qp->sq;
674 struct rxe_send_wqe *send_wqe;
675 unsigned long flags;
676
677 err = validate_send_wr(qp, ibwr, mask, length);
678 if (err)
679 return err;
680
681 spin_lock_irqsave(&qp->sq.sq_lock, flags);
682
683 if (unlikely(queue_full(sq->queue))) {
684 err = -ENOMEM;
685 goto err1;
686 }
687
688 send_wqe = producer_addr(sq->queue);
689
690 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
691 if (unlikely(err))
692 goto err1;
693
694 /*
695 * make sure all changes to the work queue are
696 * written before we update the producer pointer
697 */
698 smp_wmb();
699
700 advance_producer(sq->queue);
701 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
702
703 return 0;
704
705err1:
706 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
707 return err;
708}
709
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700710static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
711 const struct ib_send_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300712{
713 int err = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300714 unsigned int mask;
715 unsigned int length = 0;
716 int i;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300717
Moni Shoua8700e3e2016-06-16 16:45:23 +0300718 while (wr) {
719 mask = wr_opcode_mask(wr->opcode, qp);
720 if (unlikely(!mask)) {
721 err = -EINVAL;
722 *bad_wr = wr;
723 break;
724 }
725
726 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
727 !(mask & WR_INLINE_MASK))) {
728 err = -EINVAL;
729 *bad_wr = wr;
730 break;
731 }
732
733 length = 0;
734 for (i = 0; i < wr->num_sge; i++)
735 length += wr->sg_list[i].length;
736
737 err = post_one_send(qp, wr, mask, length);
738
739 if (err) {
740 *bad_wr = wr;
741 break;
742 }
743 wr = wr->next;
744 }
745
Alexandru Moise1661d3b02018-05-08 11:02:02 +0200746 rxe_run_task(&qp->req.task, 1);
Bart Van Assche6f301e02018-01-09 11:23:40 -0800747 if (unlikely(qp->req.state == QP_STATE_ERROR))
748 rxe_run_task(&qp->comp.task, 1);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300749
750 return err;
751}
752
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700753static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
754 const struct ib_send_wr **bad_wr)
Parav Pandit063af592016-09-28 20:24:12 +0000755{
756 struct rxe_qp *qp = to_rqp(ibqp);
757
758 if (unlikely(!qp->valid)) {
759 *bad_wr = wr;
760 return -EINVAL;
761 }
762
763 if (unlikely(qp->req.state < QP_STATE_READY)) {
764 *bad_wr = wr;
765 return -EINVAL;
766 }
767
768 if (qp->is_user) {
769 /* Utilize process context to do protocol processing */
770 rxe_run_task(&qp->req.task, 0);
771 return 0;
772 } else
773 return rxe_post_send_kernel(qp, wr, bad_wr);
774}
775
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700776static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
777 const struct ib_recv_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300778{
779 int err = 0;
780 struct rxe_qp *qp = to_rqp(ibqp);
781 struct rxe_rq *rq = &qp->rq;
782 unsigned long flags;
783
784 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
785 *bad_wr = wr;
786 err = -EINVAL;
787 goto err1;
788 }
789
790 if (unlikely(qp->srq)) {
791 *bad_wr = wr;
792 err = -EINVAL;
793 goto err1;
794 }
795
796 spin_lock_irqsave(&rq->producer_lock, flags);
797
798 while (wr) {
799 err = post_one_recv(rq, wr);
800 if (unlikely(err)) {
801 *bad_wr = wr;
802 break;
803 }
804 wr = wr->next;
805 }
806
807 spin_unlock_irqrestore(&rq->producer_lock, flags);
808
Vijay Immanuel12171972017-06-27 12:19:38 +0300809 if (qp->resp.state == QP_STATE_ERROR)
810 rxe_run_task(&qp->resp.task, 1);
811
Moni Shoua8700e3e2016-06-16 16:45:23 +0300812err1:
813 return err;
814}
815
816static struct ib_cq *rxe_create_cq(struct ib_device *dev,
817 const struct ib_cq_init_attr *attr,
818 struct ib_ucontext *context,
819 struct ib_udata *udata)
820{
821 int err;
822 struct rxe_dev *rxe = to_rdev(dev);
823 struct rxe_cq *cq;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600824 struct rxe_create_cq_resp __user *uresp = NULL;
825
826 if (udata) {
827 if (udata->outlen < sizeof(*uresp))
828 return ERR_PTR(-EINVAL);
829 uresp = udata->outbuf;
830 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300831
832 if (attr->flags)
833 return ERR_PTR(-EINVAL);
834
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600835 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300836 if (err)
837 goto err1;
838
839 cq = rxe_alloc(&rxe->cq_pool);
840 if (!cq) {
841 err = -ENOMEM;
842 goto err1;
843 }
844
845 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600846 context, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300847 if (err)
848 goto err2;
849
850 return &cq->ibcq;
851
852err2:
853 rxe_drop_ref(cq);
854err1:
855 return ERR_PTR(err);
856}
857
858static int rxe_destroy_cq(struct ib_cq *ibcq)
859{
860 struct rxe_cq *cq = to_rcq(ibcq);
861
Andrew Boyerbfc3ae02017-08-28 16:11:50 -0400862 rxe_cq_disable(cq);
863
Moni Shoua8700e3e2016-06-16 16:45:23 +0300864 rxe_drop_ref(cq);
865 return 0;
866}
867
868static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
869{
870 int err;
871 struct rxe_cq *cq = to_rcq(ibcq);
872 struct rxe_dev *rxe = to_rdev(ibcq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600873 struct rxe_resize_cq_resp __user *uresp = NULL;
874
875 if (udata) {
876 if (udata->outlen < sizeof(*uresp))
877 return -EINVAL;
878 uresp = udata->outbuf;
879 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300880
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600881 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300882 if (err)
883 goto err1;
884
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600885 err = rxe_cq_resize_queue(cq, cqe, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300886 if (err)
887 goto err1;
888
889 return 0;
890
891err1:
892 return err;
893}
894
895static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
896{
897 int i;
898 struct rxe_cq *cq = to_rcq(ibcq);
899 struct rxe_cqe *cqe;
900 unsigned long flags;
901
902 spin_lock_irqsave(&cq->cq_lock, flags);
903 for (i = 0; i < num_entries; i++) {
904 cqe = queue_head(cq->queue);
905 if (!cqe)
906 break;
907
908 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
909 advance_consumer(cq->queue);
910 }
911 spin_unlock_irqrestore(&cq->cq_lock, flags);
912
913 return i;
914}
915
916static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
917{
918 struct rxe_cq *cq = to_rcq(ibcq);
919 int count = queue_count(cq->queue);
920
921 return (count > wc_cnt) ? wc_cnt : count;
922}
923
924static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
925{
926 struct rxe_cq *cq = to_rcq(ibcq);
Andrew Boyeraccacb82016-11-23 12:39:22 -0500927 unsigned long irq_flags;
928 int ret = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300929
Andrew Boyeraccacb82016-11-23 12:39:22 -0500930 spin_lock_irqsave(&cq->cq_lock, irq_flags);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300931 if (cq->notify != IB_CQ_NEXT_COMP)
932 cq->notify = flags & IB_CQ_SOLICITED_MASK;
933
Andrew Boyeraccacb82016-11-23 12:39:22 -0500934 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
935 ret = 1;
936
937 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
938
939 return ret;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300940}
941
942static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
943{
944 struct rxe_dev *rxe = to_rdev(ibpd->device);
945 struct rxe_pd *pd = to_rpd(ibpd);
946 struct rxe_mem *mr;
947 int err;
948
949 mr = rxe_alloc(&rxe->mr_pool);
950 if (!mr) {
951 err = -ENOMEM;
952 goto err1;
953 }
954
955 rxe_add_index(mr);
956
957 rxe_add_ref(pd);
958
Zhu Yanjune12ee8c2018-04-23 03:57:58 -0400959 err = rxe_mem_init_dma(pd, access, mr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300960 if (err)
961 goto err2;
962
963 return &mr->ibmr;
964
965err2:
966 rxe_drop_ref(pd);
967 rxe_drop_index(mr);
968 rxe_drop_ref(mr);
969err1:
970 return ERR_PTR(err);
971}
972
973static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
974 u64 start,
975 u64 length,
976 u64 iova,
977 int access, struct ib_udata *udata)
978{
979 int err;
980 struct rxe_dev *rxe = to_rdev(ibpd->device);
981 struct rxe_pd *pd = to_rpd(ibpd);
982 struct rxe_mem *mr;
983
984 mr = rxe_alloc(&rxe->mr_pool);
985 if (!mr) {
986 err = -ENOMEM;
987 goto err2;
988 }
989
990 rxe_add_index(mr);
991
992 rxe_add_ref(pd);
993
Zhu Yanjune12ee8c2018-04-23 03:57:58 -0400994 err = rxe_mem_init_user(pd, start, length, iova,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300995 access, udata, mr);
996 if (err)
997 goto err3;
998
999 return &mr->ibmr;
1000
1001err3:
1002 rxe_drop_ref(pd);
1003 rxe_drop_index(mr);
1004 rxe_drop_ref(mr);
1005err2:
1006 return ERR_PTR(err);
1007}
1008
1009static int rxe_dereg_mr(struct ib_mr *ibmr)
1010{
1011 struct rxe_mem *mr = to_rmr(ibmr);
1012
1013 mr->state = RXE_MEM_STATE_ZOMBIE;
1014 rxe_drop_ref(mr->pd);
1015 rxe_drop_index(mr);
1016 rxe_drop_ref(mr);
1017 return 0;
1018}
1019
1020static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1021 enum ib_mr_type mr_type,
1022 u32 max_num_sg)
1023{
1024 struct rxe_dev *rxe = to_rdev(ibpd->device);
1025 struct rxe_pd *pd = to_rpd(ibpd);
1026 struct rxe_mem *mr;
1027 int err;
1028
1029 if (mr_type != IB_MR_TYPE_MEM_REG)
1030 return ERR_PTR(-EINVAL);
1031
1032 mr = rxe_alloc(&rxe->mr_pool);
1033 if (!mr) {
1034 err = -ENOMEM;
1035 goto err1;
1036 }
1037
1038 rxe_add_index(mr);
1039
1040 rxe_add_ref(pd);
1041
Zhu Yanjune12ee8c2018-04-23 03:57:58 -04001042 err = rxe_mem_init_fast(pd, max_num_sg, mr);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001043 if (err)
1044 goto err2;
1045
1046 return &mr->ibmr;
1047
1048err2:
1049 rxe_drop_ref(pd);
1050 rxe_drop_index(mr);
1051 rxe_drop_ref(mr);
1052err1:
1053 return ERR_PTR(err);
1054}
1055
1056static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1057{
1058 struct rxe_mem *mr = to_rmr(ibmr);
1059 struct rxe_map *map;
1060 struct rxe_phys_buf *buf;
1061
1062 if (unlikely(mr->nbuf == mr->num_buf))
1063 return -ENOMEM;
1064
1065 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1066 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1067
1068 buf->addr = addr;
1069 buf->size = ibmr->page_size;
1070 mr->nbuf++;
1071
1072 return 0;
1073}
1074
Parav Pandite404f942016-09-28 20:26:26 +00001075static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1076 int sg_nents, unsigned int *sg_offset)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001077{
1078 struct rxe_mem *mr = to_rmr(ibmr);
1079 int n;
1080
1081 mr->nbuf = 0;
1082
1083 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1084
1085 mr->va = ibmr->iova;
1086 mr->iova = ibmr->iova;
1087 mr->length = ibmr->length;
1088 mr->page_shift = ilog2(ibmr->page_size);
1089 mr->page_mask = ibmr->page_size - 1;
1090 mr->offset = mr->iova & mr->page_mask;
1091
1092 return n;
1093}
1094
1095static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1096{
1097 int err;
1098 struct rxe_dev *rxe = to_rdev(ibqp->device);
1099 struct rxe_qp *qp = to_rqp(ibqp);
1100 struct rxe_mc_grp *grp;
1101
1102 /* takes a ref on grp if successful */
1103 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1104 if (err)
1105 return err;
1106
1107 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1108
1109 rxe_drop_ref(grp);
1110 return err;
1111}
1112
1113static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1114{
1115 struct rxe_dev *rxe = to_rdev(ibqp->device);
1116 struct rxe_qp *qp = to_rqp(ibqp);
1117
1118 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1119}
1120
Kamal Heibc05d2662017-06-15 11:29:05 +03001121static ssize_t parent_show(struct device *device,
1122 struct device_attribute *attr, char *buf)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001123{
Parav Pandit54747232018-12-18 14:15:56 +02001124 struct rxe_dev *rxe =
1125 rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001126
Bart Van Assche839f5ac2017-01-10 11:15:53 -08001127 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001128}
1129
Kamal Heibc05d2662017-06-15 11:29:05 +03001130static DEVICE_ATTR_RO(parent);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001131
Parav Pandit508a5232018-10-11 22:31:54 +03001132static struct attribute *rxe_dev_attributes[] = {
1133 &dev_attr_parent.attr,
1134 NULL
1135};
1136
1137static const struct attribute_group rxe_attr_group = {
1138 .attrs = rxe_dev_attributes,
Moni Shoua8700e3e2016-06-16 16:45:23 +03001139};
1140
Kamal Heib573efc42018-12-10 21:09:46 +02001141static const struct ib_device_ops rxe_dev_ops = {
1142 .alloc_hw_stats = rxe_ib_alloc_hw_stats,
1143 .alloc_mr = rxe_alloc_mr,
1144 .alloc_pd = rxe_alloc_pd,
1145 .alloc_ucontext = rxe_alloc_ucontext,
1146 .attach_mcast = rxe_attach_mcast,
1147 .create_ah = rxe_create_ah,
1148 .create_cq = rxe_create_cq,
1149 .create_qp = rxe_create_qp,
1150 .create_srq = rxe_create_srq,
1151 .dealloc_pd = rxe_dealloc_pd,
1152 .dealloc_ucontext = rxe_dealloc_ucontext,
1153 .dereg_mr = rxe_dereg_mr,
1154 .destroy_ah = rxe_destroy_ah,
1155 .destroy_cq = rxe_destroy_cq,
1156 .destroy_qp = rxe_destroy_qp,
1157 .destroy_srq = rxe_destroy_srq,
1158 .detach_mcast = rxe_detach_mcast,
1159 .get_dma_mr = rxe_get_dma_mr,
1160 .get_hw_stats = rxe_ib_get_hw_stats,
1161 .get_link_layer = rxe_get_link_layer,
1162 .get_netdev = rxe_get_netdev,
1163 .get_port_immutable = rxe_port_immutable,
1164 .map_mr_sg = rxe_map_mr_sg,
1165 .mmap = rxe_mmap,
1166 .modify_ah = rxe_modify_ah,
1167 .modify_device = rxe_modify_device,
1168 .modify_port = rxe_modify_port,
1169 .modify_qp = rxe_modify_qp,
1170 .modify_srq = rxe_modify_srq,
1171 .peek_cq = rxe_peek_cq,
1172 .poll_cq = rxe_poll_cq,
1173 .post_recv = rxe_post_recv,
1174 .post_send = rxe_post_send,
1175 .post_srq_recv = rxe_post_srq_recv,
1176 .query_ah = rxe_query_ah,
1177 .query_device = rxe_query_device,
1178 .query_pkey = rxe_query_pkey,
1179 .query_port = rxe_query_port,
1180 .query_qp = rxe_query_qp,
1181 .query_srq = rxe_query_srq,
1182 .reg_user_mr = rxe_reg_user_mr,
1183 .req_notify_cq = rxe_req_notify_cq,
1184 .resize_cq = rxe_resize_cq,
Leon Romanovsky21a428a2019-02-03 14:55:51 +02001185 INIT_RDMA_OBJ_SIZE(ib_pd, rxe_pd, ibpd),
Kamal Heib573efc42018-12-10 21:09:46 +02001186};
1187
Moni Shoua8700e3e2016-06-16 16:45:23 +03001188int rxe_register_device(struct rxe_dev *rxe)
1189{
1190 int err;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001191 struct ib_device *dev = &rxe->ib_dev;
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001192 struct crypto_shash *tfm;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001193
Moni Shoua8700e3e2016-06-16 16:45:23 +03001194 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1195
1196 dev->owner = THIS_MODULE;
1197 dev->node_type = RDMA_NODE_IB_CA;
1198 dev->phys_port_cnt = 1;
Sagi Grimberg67cf3622017-05-04 16:23:07 +03001199 dev->num_comp_vectors = num_possible_cpus();
Bart Van Assche85e9f1d2017-01-20 13:04:29 -08001200 dev->dev.parent = rxe_dma_device(rxe);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001201 dev->local_dma_lkey = 0;
Yuval Shaia4d6f2852017-03-14 16:01:57 +02001202 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1203 rxe->ndev->dev_addr);
Bart Van Assche0bbb3b72017-01-20 13:04:37 -08001204 dev->dev.dma_ops = &dma_virt_ops;
yonatanc56012e12017-06-22 17:10:00 +03001205 dma_coerce_mask_and_coherent(&dev->dev,
Mikhail Malyginefc365e2018-04-02 12:26:59 +03001206 dma_get_required_mask(&dev->dev));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001207
1208 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1209 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1210 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1211 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1212 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1213 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1214 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1215 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1216 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1217 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1218 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1219 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1220 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1221 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1222 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1223 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1224 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1225 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1226 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1227 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1228 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1229 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1230 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1231 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1232 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1233 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1234 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1235 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1236 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1237 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1238 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1239 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1240 ;
1241
Kamal Heib573efc42018-12-10 21:09:46 +02001242 ib_set_device_ops(dev, &rxe_dev_ops);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001243
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001244 tfm = crypto_alloc_shash("crc32", 0, 0);
1245 if (IS_ERR(tfm)) {
Colin Ian King27b0b832017-04-24 10:26:42 +01001246 pr_err("failed to allocate crc algorithm err:%ld\n",
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001247 PTR_ERR(tfm));
1248 return PTR_ERR(tfm);
yonatanccee26882017-04-20 20:55:55 +03001249 }
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001250 rxe->tfm = tfm;
yonatanccee26882017-04-20 20:55:55 +03001251
Parav Pandit508a5232018-10-11 22:31:54 +03001252 rdma_set_device_sysfs_group(dev, &rxe_attr_group);
Matan Barak0ede73b2018-03-19 15:02:34 +02001253 dev->driver_id = RDMA_DRIVER_RXE;
Parav Panditea4baf72018-12-18 14:28:30 +02001254 err = ib_register_device(dev, "rxe%d");
Moni Shoua8700e3e2016-06-16 16:45:23 +03001255 if (err) {
Kamal Heib61013822017-06-15 11:29:06 +03001256 pr_warn("%s failed with error %d\n", __func__, err);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001257 goto err1;
1258 }
1259
Moni Shoua8700e3e2016-06-16 16:45:23 +03001260 return 0;
1261
Moni Shoua8700e3e2016-06-16 16:45:23 +03001262err1:
yonatanccee26882017-04-20 20:55:55 +03001263 crypto_free_shash(rxe->tfm);
1264
Moni Shoua8700e3e2016-06-16 16:45:23 +03001265 return err;
1266}
1267
Zhu Yanjun8c995962018-11-03 08:13:19 -04001268void rxe_unregister_device(struct rxe_dev *rxe)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001269{
Moni Shoua8700e3e2016-06-16 16:45:23 +03001270 struct ib_device *dev = &rxe->ib_dev;
1271
Moni Shoua8700e3e2016-06-16 16:45:23 +03001272 ib_unregister_device(dev);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001273}