blob: ecd0824b7eac164c8457e888e7808cde9ad047c8 [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Bart Van Assche0bbb3b72017-01-20 13:04:37 -080034#include <linux/dma-mapping.h>
Yuval Shaia4d6f2852017-03-14 16:01:57 +020035#include <net/addrconf.h>
Moni Shoua8700e3e2016-06-16 16:45:23 +030036#include "rxe.h"
37#include "rxe_loc.h"
38#include "rxe_queue.h"
Yonatan Cohen0b1e5b92017-03-10 18:23:56 +020039#include "rxe_hw_counters.h"
Moni Shoua8700e3e2016-06-16 16:45:23 +030040
41static int rxe_query_device(struct ib_device *dev,
42 struct ib_device_attr *attr,
43 struct ib_udata *uhw)
44{
45 struct rxe_dev *rxe = to_rdev(dev);
46
47 if (uhw->inlen || uhw->outlen)
48 return -EINVAL;
49
50 *attr = rxe->attr;
51 return 0;
52}
53
Moni Shoua8700e3e2016-06-16 16:45:23 +030054static int rxe_query_port(struct ib_device *dev,
55 u8 port_num, struct ib_port_attr *attr)
56{
57 struct rxe_dev *rxe = to_rdev(dev);
58 struct rxe_port *port;
Yuval Shaia59590b82018-12-09 13:06:10 +020059 int rc;
Moni Shoua8700e3e2016-06-16 16:45:23 +030060
61 port = &rxe->port;
62
Or Gerlitzc4550c62017-01-24 13:02:39 +020063 /* *attr being zeroed by the caller, avoid zeroing it here */
Moni Shoua8700e3e2016-06-16 16:45:23 +030064 *attr = port->attr;
65
66 mutex_lock(&rxe->usdev_lock);
Yuval Shaiad4186192017-06-14 23:13:34 +030067 rc = ib_get_eth_speed(dev, port_num, &attr->active_speed,
68 &attr->active_width);
Andrew Boyer5736c7c2018-11-01 09:18:45 -040069
70 if (attr->state == IB_PORT_ACTIVE)
71 attr->phys_state = RDMA_LINK_PHYS_STATE_LINK_UP;
72 else if (dev_get_flags(rxe->ndev) & IFF_UP)
73 attr->phys_state = RDMA_LINK_PHYS_STATE_POLLING;
74 else
75 attr->phys_state = RDMA_LINK_PHYS_STATE_DISABLED;
76
Moni Shoua8700e3e2016-06-16 16:45:23 +030077 mutex_unlock(&rxe->usdev_lock);
78
Yuval Shaiad4186192017-06-14 23:13:34 +030079 return rc;
Moni Shoua8700e3e2016-06-16 16:45:23 +030080}
81
Moni Shoua8700e3e2016-06-16 16:45:23 +030082static struct net_device *rxe_get_netdev(struct ib_device *device,
83 u8 port_num)
84{
85 struct rxe_dev *rxe = to_rdev(device);
86
87 if (rxe->ndev) {
88 dev_hold(rxe->ndev);
89 return rxe->ndev;
90 }
91
92 return NULL;
93}
94
95static int rxe_query_pkey(struct ib_device *device,
96 u8 port_num, u16 index, u16 *pkey)
97{
98 struct rxe_dev *rxe = to_rdev(device);
99 struct rxe_port *port;
100
Moni Shoua8700e3e2016-06-16 16:45:23 +0300101 port = &rxe->port;
102
103 if (unlikely(index >= port->attr.pkey_tbl_len)) {
Bart Van Assche85e9f1d2017-01-20 13:04:29 -0800104 dev_warn(device->dev.parent, "invalid index = %d\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300105 index);
106 goto err1;
107 }
108
109 *pkey = port->pkey_tbl[index];
110 return 0;
111
112err1:
113 return -EINVAL;
114}
115
116static int rxe_modify_device(struct ib_device *dev,
117 int mask, struct ib_device_modify *attr)
118{
119 struct rxe_dev *rxe = to_rdev(dev);
120
121 if (mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID)
122 rxe->attr.sys_image_guid = cpu_to_be64(attr->sys_image_guid);
123
124 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
125 memcpy(rxe->ib_dev.node_desc,
126 attr->node_desc, sizeof(rxe->ib_dev.node_desc));
127 }
128
129 return 0;
130}
131
132static int rxe_modify_port(struct ib_device *dev,
133 u8 port_num, int mask, struct ib_port_modify *attr)
134{
135 struct rxe_dev *rxe = to_rdev(dev);
136 struct rxe_port *port;
137
Moni Shoua8700e3e2016-06-16 16:45:23 +0300138 port = &rxe->port;
139
140 port->attr.port_cap_flags |= attr->set_port_cap_mask;
141 port->attr.port_cap_flags &= ~attr->clr_port_cap_mask;
142
143 if (mask & IB_PORT_RESET_QKEY_CNTR)
144 port->attr.qkey_viol_cntr = 0;
145
146 return 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300147}
148
149static enum rdma_link_layer rxe_get_link_layer(struct ib_device *dev,
150 u8 port_num)
151{
152 struct rxe_dev *rxe = to_rdev(dev);
153
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800154 return rxe_link_layer(rxe, port_num);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300155}
156
157static struct ib_ucontext *rxe_alloc_ucontext(struct ib_device *dev,
158 struct ib_udata *udata)
159{
160 struct rxe_dev *rxe = to_rdev(dev);
161 struct rxe_ucontext *uc;
162
163 uc = rxe_alloc(&rxe->uc_pool);
164 return uc ? &uc->ibuc : ERR_PTR(-ENOMEM);
165}
166
167static int rxe_dealloc_ucontext(struct ib_ucontext *ibuc)
168{
169 struct rxe_ucontext *uc = to_ruc(ibuc);
170
171 rxe_drop_ref(uc);
172 return 0;
173}
174
175static int rxe_port_immutable(struct ib_device *dev, u8 port_num,
176 struct ib_port_immutable *immutable)
177{
178 int err;
179 struct ib_port_attr attr;
180
Or Gerlitzc4550c62017-01-24 13:02:39 +0200181 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP;
182
183 err = ib_query_port(dev, port_num, &attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300184 if (err)
185 return err;
186
187 immutable->pkey_tbl_len = attr.pkey_tbl_len;
188 immutable->gid_tbl_len = attr.gid_tbl_len;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300189 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
190
191 return 0;
192}
193
194static struct ib_pd *rxe_alloc_pd(struct ib_device *dev,
195 struct ib_ucontext *context,
196 struct ib_udata *udata)
197{
198 struct rxe_dev *rxe = to_rdev(dev);
199 struct rxe_pd *pd;
200
201 pd = rxe_alloc(&rxe->pd_pool);
202 return pd ? &pd->ibpd : ERR_PTR(-ENOMEM);
203}
204
205static int rxe_dealloc_pd(struct ib_pd *ibpd)
206{
207 struct rxe_pd *pd = to_rpd(ibpd);
208
209 rxe_drop_ref(pd);
210 return 0;
211}
212
Parav Pandit47ec3862018-06-13 10:22:06 +0300213static void rxe_init_av(struct rxe_dev *rxe, struct rdma_ah_attr *attr,
214 struct rxe_av *av)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300215{
Zhu Yanjunca3d9fe2018-01-31 06:06:55 -0500216 rxe_av_from_attr(rdma_ah_get_port_num(attr), av, attr);
Parav Pandit47ec3862018-06-13 10:22:06 +0300217 rxe_av_fill_ip_info(av, attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300218}
219
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400220static struct ib_ah *rxe_create_ah(struct ib_pd *ibpd,
221 struct rdma_ah_attr *attr,
Moni Shoua477864c2016-11-23 08:23:24 +0200222 struct ib_udata *udata)
223
Moni Shoua8700e3e2016-06-16 16:45:23 +0300224{
225 int err;
226 struct rxe_dev *rxe = to_rdev(ibpd->device);
227 struct rxe_pd *pd = to_rpd(ibpd);
228 struct rxe_ah *ah;
229
230 err = rxe_av_chk_attr(rxe, attr);
231 if (err)
Bart Van Assche2f229bc2018-07-06 09:45:32 -0700232 return ERR_PTR(err);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300233
234 ah = rxe_alloc(&rxe->ah_pool);
Bart Van Assche2f229bc2018-07-06 09:45:32 -0700235 if (!ah)
236 return ERR_PTR(-ENOMEM);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300237
238 rxe_add_ref(pd);
239 ah->pd = pd;
240
Parav Pandit47ec3862018-06-13 10:22:06 +0300241 rxe_init_av(rxe, attr, &ah->av);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300242 return &ah->ibah;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300243}
244
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400245static int rxe_modify_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300246{
247 int err;
248 struct rxe_dev *rxe = to_rdev(ibah->device);
249 struct rxe_ah *ah = to_rah(ibah);
250
251 err = rxe_av_chk_attr(rxe, attr);
252 if (err)
253 return err;
254
Parav Pandit47ec3862018-06-13 10:22:06 +0300255 rxe_init_av(rxe, attr, &ah->av);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300256 return 0;
257}
258
Dasaratharaman Chandramouli90898852017-04-29 14:41:18 -0400259static int rxe_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *attr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300260{
Moni Shoua8700e3e2016-06-16 16:45:23 +0300261 struct rxe_ah *ah = to_rah(ibah);
262
Dasaratharaman Chandramoulieca7ddf2017-04-29 14:41:17 -0400263 memset(attr, 0, sizeof(*attr));
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -0400264 attr->type = ibah->type;
Zhu Yanjun9c96f3d2018-01-31 06:06:56 -0500265 rxe_av_to_attr(&ah->av, attr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300266 return 0;
267}
268
269static int rxe_destroy_ah(struct ib_ah *ibah)
270{
271 struct rxe_ah *ah = to_rah(ibah);
272
273 rxe_drop_ref(ah->pd);
274 rxe_drop_ref(ah);
275 return 0;
276}
277
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700278static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300279{
280 int err;
281 int i;
282 u32 length;
283 struct rxe_recv_wqe *recv_wqe;
284 int num_sge = ibwr->num_sge;
285
286 if (unlikely(queue_full(rq->queue))) {
287 err = -ENOMEM;
288 goto err1;
289 }
290
291 if (unlikely(num_sge > rq->max_sge)) {
292 err = -EINVAL;
293 goto err1;
294 }
295
296 length = 0;
297 for (i = 0; i < num_sge; i++)
298 length += ibwr->sg_list[i].length;
299
300 recv_wqe = producer_addr(rq->queue);
301 recv_wqe->wr_id = ibwr->wr_id;
302 recv_wqe->num_sge = num_sge;
303
304 memcpy(recv_wqe->dma.sge, ibwr->sg_list,
305 num_sge * sizeof(struct ib_sge));
306
307 recv_wqe->dma.length = length;
308 recv_wqe->dma.resid = length;
309 recv_wqe->dma.num_sge = num_sge;
310 recv_wqe->dma.cur_sge = 0;
311 recv_wqe->dma.sge_offset = 0;
312
313 /* make sure all changes to the work queue are written before we
314 * update the producer pointer
315 */
316 smp_wmb();
317
318 advance_producer(rq->queue);
319 return 0;
320
321err1:
322 return err;
323}
324
325static struct ib_srq *rxe_create_srq(struct ib_pd *ibpd,
326 struct ib_srq_init_attr *init,
327 struct ib_udata *udata)
328{
329 int err;
330 struct rxe_dev *rxe = to_rdev(ibpd->device);
331 struct rxe_pd *pd = to_rpd(ibpd);
332 struct rxe_srq *srq;
333 struct ib_ucontext *context = udata ? ibpd->uobject->context : NULL;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600334 struct rxe_create_srq_resp __user *uresp = NULL;
335
336 if (udata) {
337 if (udata->outlen < sizeof(*uresp))
338 return ERR_PTR(-EINVAL);
339 uresp = udata->outbuf;
340 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300341
342 err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK);
343 if (err)
344 goto err1;
345
346 srq = rxe_alloc(&rxe->srq_pool);
347 if (!srq) {
348 err = -ENOMEM;
349 goto err1;
350 }
351
352 rxe_add_index(srq);
353 rxe_add_ref(pd);
354 srq->pd = pd;
355
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600356 err = rxe_srq_from_init(rxe, srq, init, context, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300357 if (err)
358 goto err2;
359
360 return &srq->ibsrq;
361
362err2:
363 rxe_drop_ref(pd);
364 rxe_drop_index(srq);
365 rxe_drop_ref(srq);
366err1:
367 return ERR_PTR(err);
368}
369
370static int rxe_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
371 enum ib_srq_attr_mask mask,
372 struct ib_udata *udata)
373{
374 int err;
375 struct rxe_srq *srq = to_rsrq(ibsrq);
376 struct rxe_dev *rxe = to_rdev(ibsrq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600377 struct rxe_modify_srq_cmd ucmd = {};
378
379 if (udata) {
380 if (udata->inlen < sizeof(ucmd))
381 return -EINVAL;
382
383 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
384 if (err)
385 return err;
386 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300387
388 err = rxe_srq_chk_attr(rxe, srq, attr, mask);
389 if (err)
390 goto err1;
391
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600392 err = rxe_srq_from_attr(rxe, srq, attr, mask, &ucmd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300393 if (err)
394 goto err1;
395
396 return 0;
397
398err1:
399 return err;
400}
401
402static int rxe_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
403{
404 struct rxe_srq *srq = to_rsrq(ibsrq);
405
406 if (srq->error)
407 return -EINVAL;
408
409 attr->max_wr = srq->rq.queue->buf->index_mask;
410 attr->max_sge = srq->rq.max_sge;
411 attr->srq_limit = srq->limit;
412 return 0;
413}
414
415static int rxe_destroy_srq(struct ib_srq *ibsrq)
416{
417 struct rxe_srq *srq = to_rsrq(ibsrq);
418
419 if (srq->rq.queue)
420 rxe_queue_cleanup(srq->rq.queue);
421
422 rxe_drop_ref(srq->pd);
423 rxe_drop_index(srq);
424 rxe_drop_ref(srq);
425
426 return 0;
427}
428
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700429static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
430 const struct ib_recv_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300431{
432 int err = 0;
433 unsigned long flags;
434 struct rxe_srq *srq = to_rsrq(ibsrq);
435
436 spin_lock_irqsave(&srq->rq.producer_lock, flags);
437
438 while (wr) {
439 err = post_one_recv(&srq->rq, wr);
440 if (unlikely(err))
441 break;
442 wr = wr->next;
443 }
444
445 spin_unlock_irqrestore(&srq->rq.producer_lock, flags);
446
447 if (err)
448 *bad_wr = wr;
449
450 return err;
451}
452
453static struct ib_qp *rxe_create_qp(struct ib_pd *ibpd,
454 struct ib_qp_init_attr *init,
455 struct ib_udata *udata)
456{
457 int err;
458 struct rxe_dev *rxe = to_rdev(ibpd->device);
459 struct rxe_pd *pd = to_rpd(ibpd);
460 struct rxe_qp *qp;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600461 struct rxe_create_qp_resp __user *uresp = NULL;
462
463 if (udata) {
464 if (udata->outlen < sizeof(*uresp))
465 return ERR_PTR(-EINVAL);
466 uresp = udata->outbuf;
467 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300468
469 err = rxe_qp_chk_init(rxe, init);
470 if (err)
471 goto err1;
472
473 qp = rxe_alloc(&rxe->qp_pool);
474 if (!qp) {
475 err = -ENOMEM;
476 goto err1;
477 }
478
479 if (udata) {
480 if (udata->inlen) {
481 err = -EINVAL;
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500482 goto err2;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300483 }
484 qp->is_user = 1;
485 }
486
487 rxe_add_index(qp);
488
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600489 err = rxe_qp_from_init(rxe, qp, pd, init, uresp, ibpd);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300490 if (err)
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500491 goto err3;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300492
493 return &qp->ibqp;
494
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500495err3:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300496 rxe_drop_index(qp);
Andrew Boyer5b9ea162016-11-23 12:39:23 -0500497err2:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300498 rxe_drop_ref(qp);
499err1:
500 return ERR_PTR(err);
501}
502
503static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
504 int mask, struct ib_udata *udata)
505{
506 int err;
507 struct rxe_dev *rxe = to_rdev(ibqp->device);
508 struct rxe_qp *qp = to_rqp(ibqp);
509
510 err = rxe_qp_chk_attr(rxe, qp, attr, mask);
511 if (err)
512 goto err1;
513
514 err = rxe_qp_from_attr(qp, attr, mask, udata);
515 if (err)
516 goto err1;
517
518 return 0;
519
520err1:
521 return err;
522}
523
524static int rxe_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
525 int mask, struct ib_qp_init_attr *init)
526{
527 struct rxe_qp *qp = to_rqp(ibqp);
528
529 rxe_qp_to_init(qp, init);
530 rxe_qp_to_attr(qp, attr, mask);
531
532 return 0;
533}
534
535static int rxe_destroy_qp(struct ib_qp *ibqp)
536{
537 struct rxe_qp *qp = to_rqp(ibqp);
538
539 rxe_qp_destroy(qp);
540 rxe_drop_index(qp);
541 rxe_drop_ref(qp);
542 return 0;
543}
544
Bart Van Asschef696bf62018-07-18 09:25:14 -0700545static int validate_send_wr(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300546 unsigned int mask, unsigned int length)
547{
548 int num_sge = ibwr->num_sge;
549 struct rxe_sq *sq = &qp->sq;
550
551 if (unlikely(num_sge > sq->max_sge))
552 goto err1;
553
554 if (unlikely(mask & WR_ATOMIC_MASK)) {
555 if (length < 8)
556 goto err1;
557
558 if (atomic_wr(ibwr)->remote_addr & 0x7)
559 goto err1;
560 }
561
562 if (unlikely((ibwr->send_flags & IB_SEND_INLINE) &&
563 (length > sq->max_inline)))
564 goto err1;
565
566 return 0;
567
568err1:
569 return -EINVAL;
570}
571
572static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr,
Bart Van Asschef696bf62018-07-18 09:25:14 -0700573 const struct ib_send_wr *ibwr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300574{
575 wr->wr_id = ibwr->wr_id;
576 wr->num_sge = ibwr->num_sge;
577 wr->opcode = ibwr->opcode;
578 wr->send_flags = ibwr->send_flags;
579
580 if (qp_type(qp) == IB_QPT_UD ||
581 qp_type(qp) == IB_QPT_SMI ||
582 qp_type(qp) == IB_QPT_GSI) {
583 wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn;
584 wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey;
585 if (qp_type(qp) == IB_QPT_GSI)
586 wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index;
587 if (wr->opcode == IB_WR_SEND_WITH_IMM)
588 wr->ex.imm_data = ibwr->ex.imm_data;
589 } else {
590 switch (wr->opcode) {
591 case IB_WR_RDMA_WRITE_WITH_IMM:
592 wr->ex.imm_data = ibwr->ex.imm_data;
Bart Van Asscheea6ee932017-10-11 10:49:24 -0700593 /* fall through */
Moni Shoua8700e3e2016-06-16 16:45:23 +0300594 case IB_WR_RDMA_READ:
595 case IB_WR_RDMA_WRITE:
596 wr->wr.rdma.remote_addr = rdma_wr(ibwr)->remote_addr;
597 wr->wr.rdma.rkey = rdma_wr(ibwr)->rkey;
598 break;
599 case IB_WR_SEND_WITH_IMM:
600 wr->ex.imm_data = ibwr->ex.imm_data;
601 break;
602 case IB_WR_SEND_WITH_INV:
603 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
604 break;
605 case IB_WR_ATOMIC_CMP_AND_SWP:
606 case IB_WR_ATOMIC_FETCH_AND_ADD:
607 wr->wr.atomic.remote_addr =
608 atomic_wr(ibwr)->remote_addr;
609 wr->wr.atomic.compare_add =
610 atomic_wr(ibwr)->compare_add;
611 wr->wr.atomic.swap = atomic_wr(ibwr)->swap;
612 wr->wr.atomic.rkey = atomic_wr(ibwr)->rkey;
613 break;
614 case IB_WR_LOCAL_INV:
615 wr->ex.invalidate_rkey = ibwr->ex.invalidate_rkey;
616 break;
617 case IB_WR_REG_MR:
618 wr->wr.reg.mr = reg_wr(ibwr)->mr;
619 wr->wr.reg.key = reg_wr(ibwr)->key;
620 wr->wr.reg.access = reg_wr(ibwr)->access;
621 break;
622 default:
623 break;
624 }
625 }
626}
627
Bart Van Asschef696bf62018-07-18 09:25:14 -0700628static int init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
Moni Shoua8700e3e2016-06-16 16:45:23 +0300629 unsigned int mask, unsigned int length,
630 struct rxe_send_wqe *wqe)
631{
632 int num_sge = ibwr->num_sge;
633 struct ib_sge *sge;
634 int i;
635 u8 *p;
636
637 init_send_wr(qp, &wqe->wr, ibwr);
638
639 if (qp_type(qp) == IB_QPT_UD ||
640 qp_type(qp) == IB_QPT_SMI ||
641 qp_type(qp) == IB_QPT_GSI)
642 memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av));
643
644 if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) {
645 p = wqe->dma.inline_data;
646
647 sge = ibwr->sg_list;
648 for (i = 0; i < num_sge; i++, sge++) {
Jia-Ju Bai07d432b2017-06-05 20:23:40 +0800649 memcpy(p, (void *)(uintptr_t)sge->addr,
650 sge->length);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300651
652 p += sge->length;
653 }
654 } else if (mask & WR_REG_MASK) {
655 wqe->mask = mask;
656 wqe->state = wqe_state_posted;
657 return 0;
658 } else
659 memcpy(wqe->dma.sge, ibwr->sg_list,
660 num_sge * sizeof(struct ib_sge));
661
Bart Van Asschea6544a62018-03-01 14:00:29 -0800662 wqe->iova = mask & WR_ATOMIC_MASK ? atomic_wr(ibwr)->remote_addr :
663 mask & WR_READ_OR_WRITE_MASK ? rdma_wr(ibwr)->remote_addr : 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300664 wqe->mask = mask;
665 wqe->dma.length = length;
666 wqe->dma.resid = length;
667 wqe->dma.num_sge = num_sge;
668 wqe->dma.cur_sge = 0;
669 wqe->dma.sge_offset = 0;
670 wqe->state = wqe_state_posted;
671 wqe->ssn = atomic_add_return(1, &qp->ssn);
672
673 return 0;
674}
675
Bart Van Asschef696bf62018-07-18 09:25:14 -0700676static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr,
Parav Pandite404f942016-09-28 20:26:26 +0000677 unsigned int mask, u32 length)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300678{
679 int err;
680 struct rxe_sq *sq = &qp->sq;
681 struct rxe_send_wqe *send_wqe;
682 unsigned long flags;
683
684 err = validate_send_wr(qp, ibwr, mask, length);
685 if (err)
686 return err;
687
688 spin_lock_irqsave(&qp->sq.sq_lock, flags);
689
690 if (unlikely(queue_full(sq->queue))) {
691 err = -ENOMEM;
692 goto err1;
693 }
694
695 send_wqe = producer_addr(sq->queue);
696
697 err = init_send_wqe(qp, ibwr, mask, length, send_wqe);
698 if (unlikely(err))
699 goto err1;
700
701 /*
702 * make sure all changes to the work queue are
703 * written before we update the producer pointer
704 */
705 smp_wmb();
706
707 advance_producer(sq->queue);
708 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
709
710 return 0;
711
712err1:
713 spin_unlock_irqrestore(&qp->sq.sq_lock, flags);
714 return err;
715}
716
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700717static int rxe_post_send_kernel(struct rxe_qp *qp, const struct ib_send_wr *wr,
718 const struct ib_send_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300719{
720 int err = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300721 unsigned int mask;
722 unsigned int length = 0;
723 int i;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300724
Moni Shoua8700e3e2016-06-16 16:45:23 +0300725 while (wr) {
726 mask = wr_opcode_mask(wr->opcode, qp);
727 if (unlikely(!mask)) {
728 err = -EINVAL;
729 *bad_wr = wr;
730 break;
731 }
732
733 if (unlikely((wr->send_flags & IB_SEND_INLINE) &&
734 !(mask & WR_INLINE_MASK))) {
735 err = -EINVAL;
736 *bad_wr = wr;
737 break;
738 }
739
740 length = 0;
741 for (i = 0; i < wr->num_sge; i++)
742 length += wr->sg_list[i].length;
743
744 err = post_one_send(qp, wr, mask, length);
745
746 if (err) {
747 *bad_wr = wr;
748 break;
749 }
750 wr = wr->next;
751 }
752
Alexandru Moise1661d3b02018-05-08 11:02:02 +0200753 rxe_run_task(&qp->req.task, 1);
Bart Van Assche6f301e02018-01-09 11:23:40 -0800754 if (unlikely(qp->req.state == QP_STATE_ERROR))
755 rxe_run_task(&qp->comp.task, 1);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300756
757 return err;
758}
759
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700760static int rxe_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
761 const struct ib_send_wr **bad_wr)
Parav Pandit063af592016-09-28 20:24:12 +0000762{
763 struct rxe_qp *qp = to_rqp(ibqp);
764
765 if (unlikely(!qp->valid)) {
766 *bad_wr = wr;
767 return -EINVAL;
768 }
769
770 if (unlikely(qp->req.state < QP_STATE_READY)) {
771 *bad_wr = wr;
772 return -EINVAL;
773 }
774
775 if (qp->is_user) {
776 /* Utilize process context to do protocol processing */
777 rxe_run_task(&qp->req.task, 0);
778 return 0;
779 } else
780 return rxe_post_send_kernel(qp, wr, bad_wr);
781}
782
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700783static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
784 const struct ib_recv_wr **bad_wr)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300785{
786 int err = 0;
787 struct rxe_qp *qp = to_rqp(ibqp);
788 struct rxe_rq *rq = &qp->rq;
789 unsigned long flags;
790
791 if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) {
792 *bad_wr = wr;
793 err = -EINVAL;
794 goto err1;
795 }
796
797 if (unlikely(qp->srq)) {
798 *bad_wr = wr;
799 err = -EINVAL;
800 goto err1;
801 }
802
803 spin_lock_irqsave(&rq->producer_lock, flags);
804
805 while (wr) {
806 err = post_one_recv(rq, wr);
807 if (unlikely(err)) {
808 *bad_wr = wr;
809 break;
810 }
811 wr = wr->next;
812 }
813
814 spin_unlock_irqrestore(&rq->producer_lock, flags);
815
Vijay Immanuel12171972017-06-27 12:19:38 +0300816 if (qp->resp.state == QP_STATE_ERROR)
817 rxe_run_task(&qp->resp.task, 1);
818
Moni Shoua8700e3e2016-06-16 16:45:23 +0300819err1:
820 return err;
821}
822
823static struct ib_cq *rxe_create_cq(struct ib_device *dev,
824 const struct ib_cq_init_attr *attr,
825 struct ib_ucontext *context,
826 struct ib_udata *udata)
827{
828 int err;
829 struct rxe_dev *rxe = to_rdev(dev);
830 struct rxe_cq *cq;
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600831 struct rxe_create_cq_resp __user *uresp = NULL;
832
833 if (udata) {
834 if (udata->outlen < sizeof(*uresp))
835 return ERR_PTR(-EINVAL);
836 uresp = udata->outbuf;
837 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300838
839 if (attr->flags)
840 return ERR_PTR(-EINVAL);
841
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600842 err = rxe_cq_chk_attr(rxe, NULL, attr->cqe, attr->comp_vector);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300843 if (err)
844 goto err1;
845
846 cq = rxe_alloc(&rxe->cq_pool);
847 if (!cq) {
848 err = -ENOMEM;
849 goto err1;
850 }
851
852 err = rxe_cq_from_init(rxe, cq, attr->cqe, attr->comp_vector,
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600853 context, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300854 if (err)
855 goto err2;
856
857 return &cq->ibcq;
858
859err2:
860 rxe_drop_ref(cq);
861err1:
862 return ERR_PTR(err);
863}
864
865static int rxe_destroy_cq(struct ib_cq *ibcq)
866{
867 struct rxe_cq *cq = to_rcq(ibcq);
868
Andrew Boyerbfc3ae02017-08-28 16:11:50 -0400869 rxe_cq_disable(cq);
870
Moni Shoua8700e3e2016-06-16 16:45:23 +0300871 rxe_drop_ref(cq);
872 return 0;
873}
874
875static int rxe_resize_cq(struct ib_cq *ibcq, int cqe, struct ib_udata *udata)
876{
877 int err;
878 struct rxe_cq *cq = to_rcq(ibcq);
879 struct rxe_dev *rxe = to_rdev(ibcq->device);
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600880 struct rxe_resize_cq_resp __user *uresp = NULL;
881
882 if (udata) {
883 if (udata->outlen < sizeof(*uresp))
884 return -EINVAL;
885 uresp = udata->outbuf;
886 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300887
Jason Gunthorpeb92ec0f2018-03-13 16:33:17 -0600888 err = rxe_cq_chk_attr(rxe, cq, cqe, 0);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300889 if (err)
890 goto err1;
891
Jason Gunthorpe0c43ab32018-03-13 16:33:18 -0600892 err = rxe_cq_resize_queue(cq, cqe, uresp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300893 if (err)
894 goto err1;
895
896 return 0;
897
898err1:
899 return err;
900}
901
902static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
903{
904 int i;
905 struct rxe_cq *cq = to_rcq(ibcq);
906 struct rxe_cqe *cqe;
907 unsigned long flags;
908
909 spin_lock_irqsave(&cq->cq_lock, flags);
910 for (i = 0; i < num_entries; i++) {
911 cqe = queue_head(cq->queue);
912 if (!cqe)
913 break;
914
915 memcpy(wc++, &cqe->ibwc, sizeof(*wc));
916 advance_consumer(cq->queue);
917 }
918 spin_unlock_irqrestore(&cq->cq_lock, flags);
919
920 return i;
921}
922
923static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt)
924{
925 struct rxe_cq *cq = to_rcq(ibcq);
926 int count = queue_count(cq->queue);
927
928 return (count > wc_cnt) ? wc_cnt : count;
929}
930
931static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
932{
933 struct rxe_cq *cq = to_rcq(ibcq);
Andrew Boyeraccacb82016-11-23 12:39:22 -0500934 unsigned long irq_flags;
935 int ret = 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300936
Andrew Boyeraccacb82016-11-23 12:39:22 -0500937 spin_lock_irqsave(&cq->cq_lock, irq_flags);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300938 if (cq->notify != IB_CQ_NEXT_COMP)
939 cq->notify = flags & IB_CQ_SOLICITED_MASK;
940
Andrew Boyeraccacb82016-11-23 12:39:22 -0500941 if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !queue_empty(cq->queue))
942 ret = 1;
943
944 spin_unlock_irqrestore(&cq->cq_lock, irq_flags);
945
946 return ret;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300947}
948
949static struct ib_mr *rxe_get_dma_mr(struct ib_pd *ibpd, int access)
950{
951 struct rxe_dev *rxe = to_rdev(ibpd->device);
952 struct rxe_pd *pd = to_rpd(ibpd);
953 struct rxe_mem *mr;
954 int err;
955
956 mr = rxe_alloc(&rxe->mr_pool);
957 if (!mr) {
958 err = -ENOMEM;
959 goto err1;
960 }
961
962 rxe_add_index(mr);
963
964 rxe_add_ref(pd);
965
Zhu Yanjune12ee8c2018-04-23 03:57:58 -0400966 err = rxe_mem_init_dma(pd, access, mr);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300967 if (err)
968 goto err2;
969
970 return &mr->ibmr;
971
972err2:
973 rxe_drop_ref(pd);
974 rxe_drop_index(mr);
975 rxe_drop_ref(mr);
976err1:
977 return ERR_PTR(err);
978}
979
980static struct ib_mr *rxe_reg_user_mr(struct ib_pd *ibpd,
981 u64 start,
982 u64 length,
983 u64 iova,
984 int access, struct ib_udata *udata)
985{
986 int err;
987 struct rxe_dev *rxe = to_rdev(ibpd->device);
988 struct rxe_pd *pd = to_rpd(ibpd);
989 struct rxe_mem *mr;
990
991 mr = rxe_alloc(&rxe->mr_pool);
992 if (!mr) {
993 err = -ENOMEM;
994 goto err2;
995 }
996
997 rxe_add_index(mr);
998
999 rxe_add_ref(pd);
1000
Zhu Yanjune12ee8c2018-04-23 03:57:58 -04001001 err = rxe_mem_init_user(pd, start, length, iova,
Moni Shoua8700e3e2016-06-16 16:45:23 +03001002 access, udata, mr);
1003 if (err)
1004 goto err3;
1005
1006 return &mr->ibmr;
1007
1008err3:
1009 rxe_drop_ref(pd);
1010 rxe_drop_index(mr);
1011 rxe_drop_ref(mr);
1012err2:
1013 return ERR_PTR(err);
1014}
1015
1016static int rxe_dereg_mr(struct ib_mr *ibmr)
1017{
1018 struct rxe_mem *mr = to_rmr(ibmr);
1019
1020 mr->state = RXE_MEM_STATE_ZOMBIE;
1021 rxe_drop_ref(mr->pd);
1022 rxe_drop_index(mr);
1023 rxe_drop_ref(mr);
1024 return 0;
1025}
1026
1027static struct ib_mr *rxe_alloc_mr(struct ib_pd *ibpd,
1028 enum ib_mr_type mr_type,
1029 u32 max_num_sg)
1030{
1031 struct rxe_dev *rxe = to_rdev(ibpd->device);
1032 struct rxe_pd *pd = to_rpd(ibpd);
1033 struct rxe_mem *mr;
1034 int err;
1035
1036 if (mr_type != IB_MR_TYPE_MEM_REG)
1037 return ERR_PTR(-EINVAL);
1038
1039 mr = rxe_alloc(&rxe->mr_pool);
1040 if (!mr) {
1041 err = -ENOMEM;
1042 goto err1;
1043 }
1044
1045 rxe_add_index(mr);
1046
1047 rxe_add_ref(pd);
1048
Zhu Yanjune12ee8c2018-04-23 03:57:58 -04001049 err = rxe_mem_init_fast(pd, max_num_sg, mr);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001050 if (err)
1051 goto err2;
1052
1053 return &mr->ibmr;
1054
1055err2:
1056 rxe_drop_ref(pd);
1057 rxe_drop_index(mr);
1058 rxe_drop_ref(mr);
1059err1:
1060 return ERR_PTR(err);
1061}
1062
1063static int rxe_set_page(struct ib_mr *ibmr, u64 addr)
1064{
1065 struct rxe_mem *mr = to_rmr(ibmr);
1066 struct rxe_map *map;
1067 struct rxe_phys_buf *buf;
1068
1069 if (unlikely(mr->nbuf == mr->num_buf))
1070 return -ENOMEM;
1071
1072 map = mr->map[mr->nbuf / RXE_BUF_PER_MAP];
1073 buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP];
1074
1075 buf->addr = addr;
1076 buf->size = ibmr->page_size;
1077 mr->nbuf++;
1078
1079 return 0;
1080}
1081
Parav Pandite404f942016-09-28 20:26:26 +00001082static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg,
1083 int sg_nents, unsigned int *sg_offset)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001084{
1085 struct rxe_mem *mr = to_rmr(ibmr);
1086 int n;
1087
1088 mr->nbuf = 0;
1089
1090 n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page);
1091
1092 mr->va = ibmr->iova;
1093 mr->iova = ibmr->iova;
1094 mr->length = ibmr->length;
1095 mr->page_shift = ilog2(ibmr->page_size);
1096 mr->page_mask = ibmr->page_size - 1;
1097 mr->offset = mr->iova & mr->page_mask;
1098
1099 return n;
1100}
1101
1102static int rxe_attach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1103{
1104 int err;
1105 struct rxe_dev *rxe = to_rdev(ibqp->device);
1106 struct rxe_qp *qp = to_rqp(ibqp);
1107 struct rxe_mc_grp *grp;
1108
1109 /* takes a ref on grp if successful */
1110 err = rxe_mcast_get_grp(rxe, mgid, &grp);
1111 if (err)
1112 return err;
1113
1114 err = rxe_mcast_add_grp_elem(rxe, qp, grp);
1115
1116 rxe_drop_ref(grp);
1117 return err;
1118}
1119
1120static int rxe_detach_mcast(struct ib_qp *ibqp, union ib_gid *mgid, u16 mlid)
1121{
1122 struct rxe_dev *rxe = to_rdev(ibqp->device);
1123 struct rxe_qp *qp = to_rqp(ibqp);
1124
1125 return rxe_mcast_drop_grp_elem(rxe, qp, mgid);
1126}
1127
Kamal Heibc05d2662017-06-15 11:29:05 +03001128static ssize_t parent_show(struct device *device,
1129 struct device_attribute *attr, char *buf)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001130{
1131 struct rxe_dev *rxe = container_of(device, struct rxe_dev,
1132 ib_dev.dev);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001133
Bart Van Assche839f5ac2017-01-10 11:15:53 -08001134 return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001135}
1136
Kamal Heibc05d2662017-06-15 11:29:05 +03001137static DEVICE_ATTR_RO(parent);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001138
Parav Pandit508a5232018-10-11 22:31:54 +03001139static struct attribute *rxe_dev_attributes[] = {
1140 &dev_attr_parent.attr,
1141 NULL
1142};
1143
1144static const struct attribute_group rxe_attr_group = {
1145 .attrs = rxe_dev_attributes,
Moni Shoua8700e3e2016-06-16 16:45:23 +03001146};
1147
Kamal Heib573efc42018-12-10 21:09:46 +02001148static const struct ib_device_ops rxe_dev_ops = {
1149 .alloc_hw_stats = rxe_ib_alloc_hw_stats,
1150 .alloc_mr = rxe_alloc_mr,
1151 .alloc_pd = rxe_alloc_pd,
1152 .alloc_ucontext = rxe_alloc_ucontext,
1153 .attach_mcast = rxe_attach_mcast,
1154 .create_ah = rxe_create_ah,
1155 .create_cq = rxe_create_cq,
1156 .create_qp = rxe_create_qp,
1157 .create_srq = rxe_create_srq,
1158 .dealloc_pd = rxe_dealloc_pd,
1159 .dealloc_ucontext = rxe_dealloc_ucontext,
1160 .dereg_mr = rxe_dereg_mr,
1161 .destroy_ah = rxe_destroy_ah,
1162 .destroy_cq = rxe_destroy_cq,
1163 .destroy_qp = rxe_destroy_qp,
1164 .destroy_srq = rxe_destroy_srq,
1165 .detach_mcast = rxe_detach_mcast,
1166 .get_dma_mr = rxe_get_dma_mr,
1167 .get_hw_stats = rxe_ib_get_hw_stats,
1168 .get_link_layer = rxe_get_link_layer,
1169 .get_netdev = rxe_get_netdev,
1170 .get_port_immutable = rxe_port_immutable,
1171 .map_mr_sg = rxe_map_mr_sg,
1172 .mmap = rxe_mmap,
1173 .modify_ah = rxe_modify_ah,
1174 .modify_device = rxe_modify_device,
1175 .modify_port = rxe_modify_port,
1176 .modify_qp = rxe_modify_qp,
1177 .modify_srq = rxe_modify_srq,
1178 .peek_cq = rxe_peek_cq,
1179 .poll_cq = rxe_poll_cq,
1180 .post_recv = rxe_post_recv,
1181 .post_send = rxe_post_send,
1182 .post_srq_recv = rxe_post_srq_recv,
1183 .query_ah = rxe_query_ah,
1184 .query_device = rxe_query_device,
1185 .query_pkey = rxe_query_pkey,
1186 .query_port = rxe_query_port,
1187 .query_qp = rxe_query_qp,
1188 .query_srq = rxe_query_srq,
1189 .reg_user_mr = rxe_reg_user_mr,
1190 .req_notify_cq = rxe_req_notify_cq,
1191 .resize_cq = rxe_resize_cq,
1192};
1193
Moni Shoua8700e3e2016-06-16 16:45:23 +03001194int rxe_register_device(struct rxe_dev *rxe)
1195{
1196 int err;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001197 struct ib_device *dev = &rxe->ib_dev;
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001198 struct crypto_shash *tfm;
Moni Shoua8700e3e2016-06-16 16:45:23 +03001199
Moni Shoua8700e3e2016-06-16 16:45:23 +03001200 strlcpy(dev->node_desc, "rxe", sizeof(dev->node_desc));
1201
1202 dev->owner = THIS_MODULE;
1203 dev->node_type = RDMA_NODE_IB_CA;
1204 dev->phys_port_cnt = 1;
Sagi Grimberg67cf3622017-05-04 16:23:07 +03001205 dev->num_comp_vectors = num_possible_cpus();
Bart Van Assche85e9f1d2017-01-20 13:04:29 -08001206 dev->dev.parent = rxe_dma_device(rxe);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001207 dev->local_dma_lkey = 0;
Yuval Shaia4d6f2852017-03-14 16:01:57 +02001208 addrconf_addr_eui48((unsigned char *)&dev->node_guid,
1209 rxe->ndev->dev_addr);
Bart Van Assche0bbb3b72017-01-20 13:04:37 -08001210 dev->dev.dma_ops = &dma_virt_ops;
yonatanc56012e12017-06-22 17:10:00 +03001211 dma_coerce_mask_and_coherent(&dev->dev,
Mikhail Malyginefc365e2018-04-02 12:26:59 +03001212 dma_get_required_mask(&dev->dev));
Moni Shoua8700e3e2016-06-16 16:45:23 +03001213
1214 dev->uverbs_abi_ver = RXE_UVERBS_ABI_VERSION;
1215 dev->uverbs_cmd_mask = BIT_ULL(IB_USER_VERBS_CMD_GET_CONTEXT)
1216 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL)
1217 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_DEVICE)
1218 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_PORT)
1219 | BIT_ULL(IB_USER_VERBS_CMD_ALLOC_PD)
1220 | BIT_ULL(IB_USER_VERBS_CMD_DEALLOC_PD)
1221 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_SRQ)
1222 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_SRQ)
1223 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_SRQ)
1224 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_SRQ)
1225 | BIT_ULL(IB_USER_VERBS_CMD_POST_SRQ_RECV)
1226 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_QP)
1227 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_QP)
1228 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_QP)
1229 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_QP)
1230 | BIT_ULL(IB_USER_VERBS_CMD_POST_SEND)
1231 | BIT_ULL(IB_USER_VERBS_CMD_POST_RECV)
1232 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_CQ)
1233 | BIT_ULL(IB_USER_VERBS_CMD_RESIZE_CQ)
1234 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_CQ)
1235 | BIT_ULL(IB_USER_VERBS_CMD_POLL_CQ)
1236 | BIT_ULL(IB_USER_VERBS_CMD_PEEK_CQ)
1237 | BIT_ULL(IB_USER_VERBS_CMD_REQ_NOTIFY_CQ)
1238 | BIT_ULL(IB_USER_VERBS_CMD_REG_MR)
1239 | BIT_ULL(IB_USER_VERBS_CMD_DEREG_MR)
1240 | BIT_ULL(IB_USER_VERBS_CMD_CREATE_AH)
1241 | BIT_ULL(IB_USER_VERBS_CMD_MODIFY_AH)
1242 | BIT_ULL(IB_USER_VERBS_CMD_QUERY_AH)
1243 | BIT_ULL(IB_USER_VERBS_CMD_DESTROY_AH)
1244 | BIT_ULL(IB_USER_VERBS_CMD_ATTACH_MCAST)
1245 | BIT_ULL(IB_USER_VERBS_CMD_DETACH_MCAST)
1246 ;
1247
Kamal Heib573efc42018-12-10 21:09:46 +02001248 ib_set_device_ops(dev, &rxe_dev_ops);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001249
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001250 tfm = crypto_alloc_shash("crc32", 0, 0);
1251 if (IS_ERR(tfm)) {
Colin Ian King27b0b832017-04-24 10:26:42 +01001252 pr_err("failed to allocate crc algorithm err:%ld\n",
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001253 PTR_ERR(tfm));
1254 return PTR_ERR(tfm);
yonatanccee26882017-04-20 20:55:55 +03001255 }
Thomas Bogendoerfer3192c532017-10-31 11:16:46 +01001256 rxe->tfm = tfm;
yonatanccee26882017-04-20 20:55:55 +03001257
Parav Pandit508a5232018-10-11 22:31:54 +03001258 rdma_set_device_sysfs_group(dev, &rxe_attr_group);
Matan Barak0ede73b2018-03-19 15:02:34 +02001259 dev->driver_id = RDMA_DRIVER_RXE;
Jason Gunthorpee349f852018-09-25 16:58:09 -06001260 err = ib_register_device(dev, "rxe%d", NULL);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001261 if (err) {
Kamal Heib61013822017-06-15 11:29:06 +03001262 pr_warn("%s failed with error %d\n", __func__, err);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001263 goto err1;
1264 }
1265
Moni Shoua8700e3e2016-06-16 16:45:23 +03001266 return 0;
1267
Moni Shoua8700e3e2016-06-16 16:45:23 +03001268err1:
yonatanccee26882017-04-20 20:55:55 +03001269 crypto_free_shash(rxe->tfm);
1270
Moni Shoua8700e3e2016-06-16 16:45:23 +03001271 return err;
1272}
1273
Zhu Yanjun8c995962018-11-03 08:13:19 -04001274void rxe_unregister_device(struct rxe_dev *rxe)
Moni Shoua8700e3e2016-06-16 16:45:23 +03001275{
Moni Shoua8700e3e2016-06-16 16:45:23 +03001276 struct ib_device *dev = &rxe->ib_dev;
1277
Moni Shoua8700e3e2016-06-16 16:45:23 +03001278 ib_unregister_device(dev);
Moni Shoua8700e3e2016-06-16 16:45:23 +03001279}