blob: a3dd2cb6d5c989cc3297b47997deecff27ff92ca [file] [log] [blame]
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001// SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause
2
3/* Authors: Bernard Metzler <bmt@zurich.ibm.com> */
4/* Copyright (c) 2008-2019, IBM Corporation */
5
6#include <linux/errno.h>
7#include <linux/types.h>
8#include <linux/uaccess.h>
9#include <linux/vmalloc.h>
10#include <linux/xarray.h>
Kamal Heib0abfc792021-11-24 12:23:36 +020011#include <net/addrconf.h>
Bernard Metzler303ae1c2019-06-20 18:21:27 +020012
13#include <rdma/iw_cm.h>
14#include <rdma/ib_verbs.h>
15#include <rdma/ib_user_verbs.h>
16#include <rdma/uverbs_ioctl.h>
17
18#include "siw.h"
19#include "siw_verbs.h"
20#include "siw_mem.h"
21
22static int ib_qp_state_to_siw_qp_state[IB_QPS_ERR + 1] = {
23 [IB_QPS_RESET] = SIW_QP_STATE_IDLE,
24 [IB_QPS_INIT] = SIW_QP_STATE_IDLE,
25 [IB_QPS_RTR] = SIW_QP_STATE_RTR,
26 [IB_QPS_RTS] = SIW_QP_STATE_RTS,
27 [IB_QPS_SQD] = SIW_QP_STATE_CLOSING,
28 [IB_QPS_SQE] = SIW_QP_STATE_TERMINATE,
29 [IB_QPS_ERR] = SIW_QP_STATE_ERROR
30};
31
32static char ib_qp_state_to_string[IB_QPS_ERR + 1][sizeof("RESET")] = {
33 [IB_QPS_RESET] = "RESET", [IB_QPS_INIT] = "INIT", [IB_QPS_RTR] = "RTR",
34 [IB_QPS_RTS] = "RTS", [IB_QPS_SQD] = "SQD", [IB_QPS_SQE] = "SQE",
35 [IB_QPS_ERR] = "ERR"
36};
37
Michal Kalderon11f1a752019-10-30 11:44:14 +020038void siw_mmap_free(struct rdma_user_mmap_entry *rdma_entry)
Bernard Metzler303ae1c2019-06-20 18:21:27 +020039{
Michal Kalderon11f1a752019-10-30 11:44:14 +020040 struct siw_user_mmap_entry *entry = to_siw_mmap_entry(rdma_entry);
Bernard Metzler303ae1c2019-06-20 18:21:27 +020041
Michal Kalderon11f1a752019-10-30 11:44:14 +020042 kfree(entry);
Bernard Metzler303ae1c2019-06-20 18:21:27 +020043}
44
45int siw_mmap(struct ib_ucontext *ctx, struct vm_area_struct *vma)
46{
47 struct siw_ucontext *uctx = to_siw_ctx(ctx);
Michal Kalderon11f1a752019-10-30 11:44:14 +020048 size_t size = vma->vm_end - vma->vm_start;
49 struct rdma_user_mmap_entry *rdma_entry;
50 struct siw_user_mmap_entry *entry;
Bernard Metzler303ae1c2019-06-20 18:21:27 +020051 int rv = -EINVAL;
52
53 /*
54 * Must be page aligned
55 */
56 if (vma->vm_start & (PAGE_SIZE - 1)) {
57 pr_warn("siw: mmap not page aligned\n");
Michal Kalderon11f1a752019-10-30 11:44:14 +020058 return -EINVAL;
59 }
60 rdma_entry = rdma_user_mmap_entry_get(&uctx->base_ucontext, vma);
61 if (!rdma_entry) {
62 siw_dbg(&uctx->sdev->base_dev, "mmap lookup failed: %lu, %#zx\n",
63 vma->vm_pgoff, size);
64 return -EINVAL;
65 }
66 entry = to_siw_mmap_entry(rdma_entry);
67
68 rv = remap_vmalloc_range(vma, entry->address, 0);
69 if (rv) {
70 pr_warn("remap_vmalloc_range failed: %lu, %zu\n", vma->vm_pgoff,
71 size);
Bernard Metzler303ae1c2019-06-20 18:21:27 +020072 goto out;
73 }
Bernard Metzler303ae1c2019-06-20 18:21:27 +020074out:
Michal Kalderon11f1a752019-10-30 11:44:14 +020075 rdma_user_mmap_entry_put(rdma_entry);
76
Bernard Metzler303ae1c2019-06-20 18:21:27 +020077 return rv;
78}
79
80int siw_alloc_ucontext(struct ib_ucontext *base_ctx, struct ib_udata *udata)
81{
82 struct siw_device *sdev = to_siw_dev(base_ctx->device);
83 struct siw_ucontext *ctx = to_siw_ctx(base_ctx);
84 struct siw_uresp_alloc_ctx uresp = {};
85 int rv;
86
87 if (atomic_inc_return(&sdev->num_ctx) > SIW_MAX_CONTEXT) {
88 rv = -ENOMEM;
89 goto err_out;
90 }
Bernard Metzler303ae1c2019-06-20 18:21:27 +020091 ctx->sdev = sdev;
92
93 uresp.dev_id = sdev->vendor_part_id;
94
95 if (udata->outlen < sizeof(uresp)) {
96 rv = -EINVAL;
97 goto err_out;
98 }
99 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
100 if (rv)
101 goto err_out;
102
103 siw_dbg(base_ctx->device, "success. now %d context(s)\n",
104 atomic_read(&sdev->num_ctx));
105
106 return 0;
107
108err_out:
109 atomic_dec(&sdev->num_ctx);
110 siw_dbg(base_ctx->device, "failure %d. now %d context(s)\n", rv,
111 atomic_read(&sdev->num_ctx));
112
113 return rv;
114}
115
116void siw_dealloc_ucontext(struct ib_ucontext *base_ctx)
117{
118 struct siw_ucontext *uctx = to_siw_ctx(base_ctx);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200119
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200120 atomic_dec(&uctx->sdev->num_ctx);
121}
122
123int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr,
124 struct ib_udata *udata)
125{
126 struct siw_device *sdev = to_siw_dev(base_dev);
127
128 if (udata->inlen || udata->outlen)
129 return -EINVAL;
130
131 memset(attr, 0, sizeof(*attr));
132
133 /* Revisit atomic caps if RFC 7306 gets supported */
134 attr->atomic_cap = 0;
135 attr->device_cap_flags =
136 IB_DEVICE_MEM_MGT_EXTENSIONS | IB_DEVICE_ALLOW_USER_UNREG;
137 attr->max_cq = sdev->attrs.max_cq;
138 attr->max_cqe = sdev->attrs.max_cqe;
139 attr->max_fast_reg_page_list_len = SIW_MAX_SGE_PBL;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200140 attr->max_mr = sdev->attrs.max_mr;
141 attr->max_mw = sdev->attrs.max_mw;
142 attr->max_mr_size = ~0ull;
143 attr->max_pd = sdev->attrs.max_pd;
144 attr->max_qp = sdev->attrs.max_qp;
145 attr->max_qp_init_rd_atom = sdev->attrs.max_ird;
146 attr->max_qp_rd_atom = sdev->attrs.max_ord;
147 attr->max_qp_wr = sdev->attrs.max_qp_wr;
148 attr->max_recv_sge = sdev->attrs.max_sge;
149 attr->max_res_rd_atom = sdev->attrs.max_qp * sdev->attrs.max_ird;
150 attr->max_send_sge = sdev->attrs.max_sge;
151 attr->max_sge_rd = sdev->attrs.max_sge_rd;
152 attr->max_srq = sdev->attrs.max_srq;
153 attr->max_srq_sge = sdev->attrs.max_srq_sge;
154 attr->max_srq_wr = sdev->attrs.max_srq_wr;
155 attr->page_size_cap = PAGE_SIZE;
156 attr->vendor_id = SIW_VENDOR_ID;
157 attr->vendor_part_id = sdev->vendor_part_id;
158
Kamal Heib0abfc792021-11-24 12:23:36 +0200159 addrconf_addr_eui48((u8 *)&attr->sys_image_guid,
160 sdev->netdev->dev_addr);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200161
162 return 0;
163}
164
Mark Bloch1fb7f892021-03-01 09:04:20 +0200165int siw_query_port(struct ib_device *base_dev, u32 port,
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200166 struct ib_port_attr *attr)
167{
168 struct siw_device *sdev = to_siw_dev(base_dev);
Kamal Heib25baba22020-02-18 11:59:11 +0200169 int rv;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200170
171 memset(attr, 0, sizeof(*attr));
172
Kamal Heib25baba22020-02-18 11:59:11 +0200173 rv = ib_get_eth_speed(base_dev, port, &attr->active_speed,
174 &attr->active_width);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200175 attr->gid_tbl_len = 1;
176 attr->max_msg_sz = -1;
177 attr->max_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
Kamal Heibbeb205d2020-02-05 10:13:54 +0200178 attr->active_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
Kamal Heib72a77202019-08-07 13:31:35 +0300179 attr->phys_state = sdev->state == IB_PORT_ACTIVE ?
180 IB_PORT_PHYS_STATE_LINK_UP : IB_PORT_PHYS_STATE_DISABLED;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200181 attr->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_DEVICE_MGMT_SUP;
182 attr->state = sdev->state;
183 /*
184 * All zero
185 *
186 * attr->lid = 0;
187 * attr->bad_pkey_cntr = 0;
188 * attr->qkey_viol_cntr = 0;
189 * attr->sm_lid = 0;
190 * attr->lmc = 0;
191 * attr->max_vl_num = 0;
192 * attr->sm_sl = 0;
193 * attr->subnet_timeout = 0;
194 * attr->init_type_repy = 0;
195 */
Kamal Heib25baba22020-02-18 11:59:11 +0200196 return rv;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200197}
198
Mark Bloch1fb7f892021-03-01 09:04:20 +0200199int siw_get_port_immutable(struct ib_device *base_dev, u32 port,
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200200 struct ib_port_immutable *port_immutable)
201{
202 struct ib_port_attr attr;
203 int rv = siw_query_port(base_dev, port, &attr);
204
205 if (rv)
206 return rv;
207
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200208 port_immutable->gid_tbl_len = attr.gid_tbl_len;
209 port_immutable->core_cap_flags = RDMA_CORE_PORT_IWARP;
210
211 return 0;
212}
213
Mark Bloch1fb7f892021-03-01 09:04:20 +0200214int siw_query_gid(struct ib_device *base_dev, u32 port, int idx,
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200215 union ib_gid *gid)
216{
217 struct siw_device *sdev = to_siw_dev(base_dev);
218
219 /* subnet_prefix == interface_id == 0; */
220 memset(gid, 0, sizeof(*gid));
221 memcpy(&gid->raw[0], sdev->netdev->dev_addr, 6);
222
223 return 0;
224}
225
226int siw_alloc_pd(struct ib_pd *pd, struct ib_udata *udata)
227{
228 struct siw_device *sdev = to_siw_dev(pd->device);
229
230 if (atomic_inc_return(&sdev->num_pd) > SIW_MAX_PD) {
231 atomic_dec(&sdev->num_pd);
232 return -ENOMEM;
233 }
234 siw_dbg_pd(pd, "now %d PD's(s)\n", atomic_read(&sdev->num_pd));
235
236 return 0;
237}
238
Leon Romanovsky91a7c582020-09-07 15:09:13 +0300239int siw_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata)
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200240{
241 struct siw_device *sdev = to_siw_dev(pd->device);
242
243 siw_dbg_pd(pd, "free PD\n");
244 atomic_dec(&sdev->num_pd);
Leon Romanovsky91a7c582020-09-07 15:09:13 +0300245 return 0;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200246}
247
248void siw_qp_get_ref(struct ib_qp *base_qp)
249{
250 siw_qp_get(to_siw_qp(base_qp));
251}
252
253void siw_qp_put_ref(struct ib_qp *base_qp)
254{
255 siw_qp_put(to_siw_qp(base_qp));
256}
257
Michal Kalderon11f1a752019-10-30 11:44:14 +0200258static struct rdma_user_mmap_entry *
259siw_mmap_entry_insert(struct siw_ucontext *uctx,
260 void *address, size_t length,
261 u64 *offset)
262{
263 struct siw_user_mmap_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
264 int rv;
265
266 *offset = SIW_INVAL_UOBJ_KEY;
267 if (!entry)
268 return NULL;
269
270 entry->address = address;
271
272 rv = rdma_user_mmap_entry_insert(&uctx->base_ucontext,
273 &entry->rdma_entry,
274 length);
275 if (rv) {
276 kfree(entry);
277 return NULL;
278 }
279
280 *offset = rdma_user_mmap_get_offset(&entry->rdma_entry);
281
282 return &entry->rdma_entry;
283}
284
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200285/*
286 * siw_create_qp()
287 *
288 * Create QP of requested size on given device.
289 *
Leon Romanovsky514aee62021-07-23 14:39:50 +0300290 * @qp: Queue pait
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200291 * @attrs: Initial QP attributes.
292 * @udata: used to provide QP ID, SQ and RQ size back to user.
293 */
294
Leon Romanovsky514aee62021-07-23 14:39:50 +0300295int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs,
296 struct ib_udata *udata)
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200297{
Leon Romanovsky514aee62021-07-23 14:39:50 +0300298 struct ib_pd *pd = ibqp->pd;
299 struct siw_qp *qp = to_siw_qp(ibqp);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200300 struct ib_device *base_dev = pd->device;
301 struct siw_device *sdev = to_siw_dev(base_dev);
302 struct siw_ucontext *uctx =
303 rdma_udata_to_drv_context(udata, struct siw_ucontext,
304 base_ucontext);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200305 unsigned long flags;
306 int num_sqe, num_rqe, rv = 0;
Michal Kalderon11f1a752019-10-30 11:44:14 +0200307 size_t length;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200308
309 siw_dbg(base_dev, "create new QP\n");
310
Jason Gunthorpe1f11a762020-10-03 20:20:08 -0300311 if (attrs->create_flags)
Leon Romanovsky514aee62021-07-23 14:39:50 +0300312 return -EOPNOTSUPP;
Jason Gunthorpe1f11a762020-10-03 20:20:08 -0300313
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200314 if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) {
315 siw_dbg(base_dev, "too many QP's\n");
Leon Romanovsky514aee62021-07-23 14:39:50 +0300316 return -ENOMEM;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200317 }
318 if (attrs->qp_type != IB_QPT_RC) {
319 siw_dbg(base_dev, "only RC QP's supported\n");
Kamal Heibbb8865f2020-01-30 10:20:49 +0200320 rv = -EOPNOTSUPP;
Leon Romanovsky514aee62021-07-23 14:39:50 +0300321 goto err_atomic;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200322 }
323 if ((attrs->cap.max_send_wr > SIW_MAX_QP_WR) ||
324 (attrs->cap.max_recv_wr > SIW_MAX_QP_WR) ||
325 (attrs->cap.max_send_sge > SIW_MAX_SGE) ||
326 (attrs->cap.max_recv_sge > SIW_MAX_SGE)) {
327 siw_dbg(base_dev, "QP size error\n");
328 rv = -EINVAL;
Leon Romanovsky514aee62021-07-23 14:39:50 +0300329 goto err_atomic;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200330 }
331 if (attrs->cap.max_inline_data > SIW_MAX_INLINE) {
332 siw_dbg(base_dev, "max inline send: %d > %d\n",
333 attrs->cap.max_inline_data, (int)SIW_MAX_INLINE);
334 rv = -EINVAL;
Leon Romanovsky514aee62021-07-23 14:39:50 +0300335 goto err_atomic;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200336 }
337 /*
338 * NOTE: we allow for zero element SQ and RQ WQE's SGL's
339 * but not for a QP unable to hold any WQE (SQ + RQ)
340 */
341 if (attrs->cap.max_send_wr + attrs->cap.max_recv_wr == 0) {
342 siw_dbg(base_dev, "QP must have send or receive queue\n");
343 rv = -EINVAL;
Leon Romanovsky514aee62021-07-23 14:39:50 +0300344 goto err_atomic;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200345 }
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200346
Leon Romanovskya5688142021-05-09 14:39:21 +0300347 if (!attrs->send_cq || (!attrs->recv_cq && !attrs->srq)) {
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200348 siw_dbg(base_dev, "send CQ or receive CQ invalid\n");
349 rv = -EINVAL;
Leon Romanovsky514aee62021-07-23 14:39:50 +0300350 goto err_atomic;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200351 }
Leon Romanovsky514aee62021-07-23 14:39:50 +0300352
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200353 init_rwsem(&qp->state_lock);
354 spin_lock_init(&qp->sq_lock);
355 spin_lock_init(&qp->rq_lock);
356 spin_lock_init(&qp->orq_lock);
357
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200358 rv = siw_qp_add(sdev, qp);
359 if (rv)
Leon Romanovsky514aee62021-07-23 14:39:50 +0300360 goto err_atomic;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200361
Bernard Metzler661f3852021-01-08 13:58:45 +0100362 num_sqe = attrs->cap.max_send_wr;
363 num_rqe = attrs->cap.max_recv_wr;
364
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200365 /* All queue indices are derived from modulo operations
366 * on a free running 'get' (consumer) and 'put' (producer)
367 * unsigned counter. Having queue sizes at power of two
368 * avoids handling counter wrap around.
369 */
Bernard Metzler661f3852021-01-08 13:58:45 +0100370 if (num_sqe)
371 num_sqe = roundup_pow_of_two(num_sqe);
372 else {
373 /* Zero sized SQ is not supported */
374 rv = -EINVAL;
Leon Romanovskya3d83272021-05-09 14:41:38 +0300375 goto err_out_xa;
Bernard Metzler661f3852021-01-08 13:58:45 +0100376 }
377 if (num_rqe)
378 num_rqe = roundup_pow_of_two(num_rqe);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200379
Bernard Metzler58fb0b52019-12-10 17:17:29 +0100380 if (udata)
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200381 qp->sendq = vmalloc_user(num_sqe * sizeof(struct siw_sqe));
Bernard Metzler58fb0b52019-12-10 17:17:29 +0100382 else
383 qp->sendq = vzalloc(num_sqe * sizeof(struct siw_sqe));
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200384
385 if (qp->sendq == NULL) {
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200386 rv = -ENOMEM;
387 goto err_out_xa;
388 }
389 if (attrs->sq_sig_type != IB_SIGNAL_REQ_WR) {
390 if (attrs->sq_sig_type == IB_SIGNAL_ALL_WR)
391 qp->attrs.flags |= SIW_SIGNAL_ALL_WR;
392 else {
393 rv = -EINVAL;
394 goto err_out_xa;
395 }
396 }
397 qp->pd = pd;
Leon Romanovskya5688142021-05-09 14:39:21 +0300398 qp->scq = to_siw_cq(attrs->send_cq);
399 qp->rcq = to_siw_cq(attrs->recv_cq);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200400
401 if (attrs->srq) {
402 /*
403 * SRQ support.
404 * Verbs 6.3.7: ignore RQ size, if SRQ present
405 * Verbs 6.3.5: do not check PD of SRQ against PD of QP
406 */
407 qp->srq = to_siw_srq(attrs->srq);
408 qp->attrs.rq_size = 0;
Bernard Metzler58fb0b52019-12-10 17:17:29 +0100409 siw_dbg(base_dev, "QP [%u]: SRQ attached\n",
410 qp->base_qp.qp_num);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200411 } else if (num_rqe) {
Bernard Metzler58fb0b52019-12-10 17:17:29 +0100412 if (udata)
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200413 qp->recvq =
414 vmalloc_user(num_rqe * sizeof(struct siw_rqe));
Bernard Metzler58fb0b52019-12-10 17:17:29 +0100415 else
416 qp->recvq = vzalloc(num_rqe * sizeof(struct siw_rqe));
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200417
418 if (qp->recvq == NULL) {
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200419 rv = -ENOMEM;
420 goto err_out_xa;
421 }
422 qp->attrs.rq_size = num_rqe;
423 }
424 qp->attrs.sq_size = num_sqe;
425 qp->attrs.sq_max_sges = attrs->cap.max_send_sge;
426 qp->attrs.rq_max_sges = attrs->cap.max_recv_sge;
427
428 /* Make those two tunables fixed for now. */
429 qp->tx_ctx.gso_seg_limit = 1;
430 qp->tx_ctx.zcopy_tx = zcopy_tx;
431
432 qp->attrs.state = SIW_QP_STATE_IDLE;
433
434 if (udata) {
435 struct siw_uresp_create_qp uresp = {};
436
437 uresp.num_sqe = num_sqe;
438 uresp.num_rqe = num_rqe;
439 uresp.qp_id = qp_id(qp);
440
441 if (qp->sendq) {
Michal Kalderon11f1a752019-10-30 11:44:14 +0200442 length = num_sqe * sizeof(struct siw_sqe);
443 qp->sq_entry =
444 siw_mmap_entry_insert(uctx, qp->sendq,
445 length, &uresp.sq_key);
446 if (!qp->sq_entry) {
447 rv = -ENOMEM;
448 goto err_out_xa;
449 }
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200450 }
Michal Kalderon11f1a752019-10-30 11:44:14 +0200451
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200452 if (qp->recvq) {
Michal Kalderon11f1a752019-10-30 11:44:14 +0200453 length = num_rqe * sizeof(struct siw_rqe);
454 qp->rq_entry =
455 siw_mmap_entry_insert(uctx, qp->recvq,
456 length, &uresp.rq_key);
457 if (!qp->rq_entry) {
458 uresp.sq_key = SIW_INVAL_UOBJ_KEY;
459 rv = -ENOMEM;
460 goto err_out_xa;
461 }
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200462 }
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200463
464 if (udata->outlen < sizeof(uresp)) {
465 rv = -EINVAL;
466 goto err_out_xa;
467 }
468 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
469 if (rv)
470 goto err_out_xa;
471 }
472 qp->tx_cpu = siw_get_tx_cpu(sdev);
473 if (qp->tx_cpu < 0) {
474 rv = -EINVAL;
475 goto err_out_xa;
476 }
477 INIT_LIST_HEAD(&qp->devq);
478 spin_lock_irqsave(&sdev->lock, flags);
479 list_add_tail(&qp->devq, &sdev->qp_list);
480 spin_unlock_irqrestore(&sdev->lock, flags);
481
Leon Romanovsky514aee62021-07-23 14:39:50 +0300482 return 0;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200483
484err_out_xa:
485 xa_erase(&sdev->qp_xa, qp_id(qp));
Leon Romanovsky514aee62021-07-23 14:39:50 +0300486 if (uctx) {
487 rdma_user_mmap_entry_remove(qp->sq_entry);
488 rdma_user_mmap_entry_remove(qp->rq_entry);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200489 }
Leon Romanovsky514aee62021-07-23 14:39:50 +0300490 vfree(qp->sendq);
491 vfree(qp->recvq);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200492
Leon Romanovsky514aee62021-07-23 14:39:50 +0300493err_atomic:
494 atomic_dec(&sdev->num_qp);
495 return rv;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200496}
497
498/*
499 * Minimum siw_query_qp() verb interface.
500 *
501 * @qp_attr_mask is not used but all available information is provided
502 */
503int siw_query_qp(struct ib_qp *base_qp, struct ib_qp_attr *qp_attr,
504 int qp_attr_mask, struct ib_qp_init_attr *qp_init_attr)
505{
506 struct siw_qp *qp;
507 struct siw_device *sdev;
508
509 if (base_qp && qp_attr && qp_init_attr) {
510 qp = to_siw_qp(base_qp);
511 sdev = to_siw_dev(base_qp->device);
512 } else {
513 return -EINVAL;
514 }
515 qp_attr->cap.max_inline_data = SIW_MAX_INLINE;
516 qp_attr->cap.max_send_wr = qp->attrs.sq_size;
517 qp_attr->cap.max_send_sge = qp->attrs.sq_max_sges;
518 qp_attr->cap.max_recv_wr = qp->attrs.rq_size;
519 qp_attr->cap.max_recv_sge = qp->attrs.rq_max_sges;
520 qp_attr->path_mtu = ib_mtu_int_to_enum(sdev->netdev->mtu);
521 qp_attr->max_rd_atomic = qp->attrs.irq_size;
522 qp_attr->max_dest_rd_atomic = qp->attrs.orq_size;
523
524 qp_attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE |
525 IB_ACCESS_REMOTE_WRITE |
526 IB_ACCESS_REMOTE_READ;
527
528 qp_init_attr->qp_type = base_qp->qp_type;
529 qp_init_attr->send_cq = base_qp->send_cq;
530 qp_init_attr->recv_cq = base_qp->recv_cq;
531 qp_init_attr->srq = base_qp->srq;
532
533 qp_init_attr->cap = qp_attr->cap;
534
535 return 0;
536}
537
538int siw_verbs_modify_qp(struct ib_qp *base_qp, struct ib_qp_attr *attr,
539 int attr_mask, struct ib_udata *udata)
540{
541 struct siw_qp_attrs new_attrs;
542 enum siw_qp_attr_mask siw_attr_mask = 0;
543 struct siw_qp *qp = to_siw_qp(base_qp);
544 int rv = 0;
545
546 if (!attr_mask)
547 return 0;
548
Jason Gunthorpe26e990b2020-10-03 20:20:06 -0300549 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
550 return -EOPNOTSUPP;
551
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200552 memset(&new_attrs, 0, sizeof(new_attrs));
553
554 if (attr_mask & IB_QP_ACCESS_FLAGS) {
555 siw_attr_mask = SIW_QP_ATTR_ACCESS_FLAGS;
556
557 if (attr->qp_access_flags & IB_ACCESS_REMOTE_READ)
558 new_attrs.flags |= SIW_RDMA_READ_ENABLED;
559 if (attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE)
560 new_attrs.flags |= SIW_RDMA_WRITE_ENABLED;
561 if (attr->qp_access_flags & IB_ACCESS_MW_BIND)
562 new_attrs.flags |= SIW_RDMA_BIND_ENABLED;
563 }
564 if (attr_mask & IB_QP_STATE) {
565 siw_dbg_qp(qp, "desired IB QP state: %s\n",
566 ib_qp_state_to_string[attr->qp_state]);
567
568 new_attrs.state = ib_qp_state_to_siw_qp_state[attr->qp_state];
569
570 if (new_attrs.state > SIW_QP_STATE_RTS)
571 qp->tx_ctx.tx_suspend = 1;
572
573 siw_attr_mask |= SIW_QP_ATTR_STATE;
574 }
575 if (!siw_attr_mask)
576 goto out;
577
578 down_write(&qp->state_lock);
579
580 rv = siw_qp_modify(qp, &new_attrs, siw_attr_mask);
581
582 up_write(&qp->state_lock);
583out:
584 return rv;
585}
586
587int siw_destroy_qp(struct ib_qp *base_qp, struct ib_udata *udata)
588{
589 struct siw_qp *qp = to_siw_qp(base_qp);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200590 struct siw_ucontext *uctx =
591 rdma_udata_to_drv_context(udata, struct siw_ucontext,
592 base_ucontext);
593 struct siw_qp_attrs qp_attrs;
594
Bernard Metzlerc5362772019-08-22 19:37:38 +0200595 siw_dbg_qp(qp, "state %d\n", qp->attrs.state);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200596
597 /*
598 * Mark QP as in process of destruction to prevent from
599 * any async callbacks to RDMA core
600 */
601 qp->attrs.flags |= SIW_QP_IN_DESTROY;
602 qp->rx_stream.rx_suspend = 1;
603
Michal Kalderon11f1a752019-10-30 11:44:14 +0200604 if (uctx) {
605 rdma_user_mmap_entry_remove(qp->sq_entry);
606 rdma_user_mmap_entry_remove(qp->rq_entry);
607 }
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200608
609 down_write(&qp->state_lock);
610
611 qp_attrs.state = SIW_QP_STATE_ERROR;
612 siw_qp_modify(qp, &qp_attrs, SIW_QP_ATTR_STATE);
613
614 if (qp->cep) {
615 siw_cep_put(qp->cep);
616 qp->cep = NULL;
617 }
618 up_write(&qp->state_lock);
619
620 kfree(qp->tx_ctx.mpa_crc_hd);
621 kfree(qp->rx_stream.mpa_crc_hd);
622
623 qp->scq = qp->rcq = NULL;
624
625 siw_qp_put(qp);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200626
627 return 0;
628}
629
630/*
631 * siw_copy_inline_sgl()
632 *
633 * Prepare sgl of inlined data for sending. For userland callers
634 * function checks if given buffer addresses and len's are within
635 * process context bounds.
636 * Data from all provided sge's are copied together into the wqe,
637 * referenced by a single sge.
638 */
639static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr,
640 struct siw_sqe *sqe)
641{
642 struct ib_sge *core_sge = core_wr->sg_list;
643 void *kbuf = &sqe->sge[1];
644 int num_sge = core_wr->num_sge, bytes = 0;
645
Bernard Metzlerc5362772019-08-22 19:37:38 +0200646 sqe->sge[0].laddr = (uintptr_t)kbuf;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200647 sqe->sge[0].lkey = 0;
648
649 while (num_sge--) {
650 if (!core_sge->length) {
651 core_sge++;
652 continue;
653 }
654 bytes += core_sge->length;
655 if (bytes > SIW_MAX_INLINE) {
656 bytes = -EINVAL;
657 break;
658 }
659 memcpy(kbuf, (void *)(uintptr_t)core_sge->addr,
660 core_sge->length);
661
662 kbuf += core_sge->length;
663 core_sge++;
664 }
Jiapeng Chong76937fa52021-12-02 18:07:59 +0800665 sqe->sge[0].length = max(bytes, 0);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200666 sqe->num_sge = bytes > 0 ? 1 : 0;
667
668 return bytes;
669}
670
Bernard Metzlercf049bb2019-10-04 14:53:56 +0200671/* Complete SQ WR's without processing */
672static int siw_sq_flush_wr(struct siw_qp *qp, const struct ib_send_wr *wr,
673 const struct ib_send_wr **bad_wr)
674{
675 struct siw_sqe sqe = {};
676 int rv = 0;
677
678 while (wr) {
679 sqe.id = wr->wr_id;
680 sqe.opcode = wr->opcode;
681 rv = siw_sqe_complete(qp, &sqe, 0, SIW_WC_WR_FLUSH_ERR);
682 if (rv) {
683 if (bad_wr)
684 *bad_wr = wr;
685 break;
686 }
687 wr = wr->next;
688 }
689 return rv;
690}
691
692/* Complete RQ WR's without processing */
693static int siw_rq_flush_wr(struct siw_qp *qp, const struct ib_recv_wr *wr,
694 const struct ib_recv_wr **bad_wr)
695{
696 struct siw_rqe rqe = {};
697 int rv = 0;
698
699 while (wr) {
700 rqe.id = wr->wr_id;
701 rv = siw_rqe_complete(qp, &rqe, 0, 0, SIW_WC_WR_FLUSH_ERR);
702 if (rv) {
703 if (bad_wr)
704 *bad_wr = wr;
705 break;
706 }
707 wr = wr->next;
708 }
709 return rv;
710}
711
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200712/*
713 * siw_post_send()
714 *
715 * Post a list of S-WR's to a SQ.
716 *
717 * @base_qp: Base QP contained in siw QP
718 * @wr: Null terminated list of user WR's
719 * @bad_wr: Points to failing WR in case of synchronous failure.
720 */
721int siw_post_send(struct ib_qp *base_qp, const struct ib_send_wr *wr,
722 const struct ib_send_wr **bad_wr)
723{
724 struct siw_qp *qp = to_siw_qp(base_qp);
725 struct siw_wqe *wqe = tx_wqe(qp);
726
727 unsigned long flags;
728 int rv = 0;
729
Bernard Metzler58fb0b52019-12-10 17:17:29 +0100730 if (wr && !rdma_is_kernel_res(&qp->base_qp.res)) {
Bernard Metzlercf049bb2019-10-04 14:53:56 +0200731 siw_dbg_qp(qp, "wr must be empty for user mapped sq\n");
732 *bad_wr = wr;
733 return -EINVAL;
734 }
735
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200736 /*
737 * Try to acquire QP state lock. Must be non-blocking
738 * to accommodate kernel clients needs.
739 */
740 if (!down_read_trylock(&qp->state_lock)) {
Bernard Metzlercf049bb2019-10-04 14:53:56 +0200741 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
742 /*
743 * ERROR state is final, so we can be sure
744 * this state will not change as long as the QP
745 * exists.
746 *
747 * This handles an ib_drain_sq() call with
748 * a concurrent request to set the QP state
749 * to ERROR.
750 */
751 rv = siw_sq_flush_wr(qp, wr, bad_wr);
752 } else {
753 siw_dbg_qp(qp, "QP locked, state %d\n",
754 qp->attrs.state);
755 *bad_wr = wr;
756 rv = -ENOTCONN;
757 }
758 return rv;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200759 }
760 if (unlikely(qp->attrs.state != SIW_QP_STATE_RTS)) {
Bernard Metzlercf049bb2019-10-04 14:53:56 +0200761 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
762 /*
763 * Immediately flush this WR to CQ, if QP
764 * is in ERROR state. SQ is guaranteed to
765 * be empty, so WR complets in-order.
766 *
767 * Typically triggered by ib_drain_sq().
768 */
769 rv = siw_sq_flush_wr(qp, wr, bad_wr);
770 } else {
771 siw_dbg_qp(qp, "QP out of state %d\n",
772 qp->attrs.state);
773 *bad_wr = wr;
774 rv = -ENOTCONN;
775 }
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200776 up_read(&qp->state_lock);
Bernard Metzlercf049bb2019-10-04 14:53:56 +0200777 return rv;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200778 }
779 spin_lock_irqsave(&qp->sq_lock, flags);
780
781 while (wr) {
782 u32 idx = qp->sq_put % qp->attrs.sq_size;
783 struct siw_sqe *sqe = &qp->sendq[idx];
784
785 if (sqe->flags) {
786 siw_dbg_qp(qp, "sq full\n");
787 rv = -ENOMEM;
788 break;
789 }
790 if (wr->num_sge > qp->attrs.sq_max_sges) {
791 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
792 rv = -EINVAL;
793 break;
794 }
795 sqe->id = wr->wr_id;
796
797 if ((wr->send_flags & IB_SEND_SIGNALED) ||
798 (qp->attrs.flags & SIW_SIGNAL_ALL_WR))
799 sqe->flags |= SIW_WQE_SIGNALLED;
800
801 if (wr->send_flags & IB_SEND_FENCE)
802 sqe->flags |= SIW_WQE_READ_FENCE;
803
804 switch (wr->opcode) {
805 case IB_WR_SEND:
806 case IB_WR_SEND_WITH_INV:
807 if (wr->send_flags & IB_SEND_SOLICITED)
808 sqe->flags |= SIW_WQE_SOLICITED;
809
810 if (!(wr->send_flags & IB_SEND_INLINE)) {
811 siw_copy_sgl(wr->sg_list, sqe->sge,
812 wr->num_sge);
813 sqe->num_sge = wr->num_sge;
814 } else {
815 rv = siw_copy_inline_sgl(wr, sqe);
816 if (rv <= 0) {
817 rv = -EINVAL;
818 break;
819 }
820 sqe->flags |= SIW_WQE_INLINE;
821 sqe->num_sge = 1;
822 }
823 if (wr->opcode == IB_WR_SEND)
824 sqe->opcode = SIW_OP_SEND;
825 else {
826 sqe->opcode = SIW_OP_SEND_REMOTE_INV;
827 sqe->rkey = wr->ex.invalidate_rkey;
828 }
829 break;
830
831 case IB_WR_RDMA_READ_WITH_INV:
832 case IB_WR_RDMA_READ:
833 /*
834 * iWarp restricts RREAD sink to SGL containing
835 * 1 SGE only. we could relax to SGL with multiple
836 * elements referring the SAME ltag or even sending
837 * a private per-rreq tag referring to a checked
838 * local sgl with MULTIPLE ltag's.
839 */
840 if (unlikely(wr->num_sge != 1)) {
841 rv = -EINVAL;
842 break;
843 }
844 siw_copy_sgl(wr->sg_list, &sqe->sge[0], 1);
845 /*
846 * NOTE: zero length RREAD is allowed!
847 */
848 sqe->raddr = rdma_wr(wr)->remote_addr;
849 sqe->rkey = rdma_wr(wr)->rkey;
850 sqe->num_sge = 1;
851
852 if (wr->opcode == IB_WR_RDMA_READ)
853 sqe->opcode = SIW_OP_READ;
854 else
855 sqe->opcode = SIW_OP_READ_LOCAL_INV;
856 break;
857
858 case IB_WR_RDMA_WRITE:
859 if (!(wr->send_flags & IB_SEND_INLINE)) {
860 siw_copy_sgl(wr->sg_list, &sqe->sge[0],
861 wr->num_sge);
862 sqe->num_sge = wr->num_sge;
863 } else {
864 rv = siw_copy_inline_sgl(wr, sqe);
865 if (unlikely(rv < 0)) {
866 rv = -EINVAL;
867 break;
868 }
869 sqe->flags |= SIW_WQE_INLINE;
870 sqe->num_sge = 1;
871 }
872 sqe->raddr = rdma_wr(wr)->remote_addr;
873 sqe->rkey = rdma_wr(wr)->rkey;
874 sqe->opcode = SIW_OP_WRITE;
875 break;
876
877 case IB_WR_REG_MR:
Bernard Metzlerc5362772019-08-22 19:37:38 +0200878 sqe->base_mr = (uintptr_t)reg_wr(wr)->mr;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200879 sqe->rkey = reg_wr(wr)->key;
880 sqe->access = reg_wr(wr)->access & IWARP_ACCESS_MASK;
881 sqe->opcode = SIW_OP_REG_MR;
882 break;
883
884 case IB_WR_LOCAL_INV:
885 sqe->rkey = wr->ex.invalidate_rkey;
886 sqe->opcode = SIW_OP_INVAL_STAG;
887 break;
888
889 default:
890 siw_dbg_qp(qp, "ib wr type %d unsupported\n",
891 wr->opcode);
892 rv = -EINVAL;
893 break;
894 }
Bernard Metzlerc5362772019-08-22 19:37:38 +0200895 siw_dbg_qp(qp, "opcode %d, flags 0x%x, wr_id 0x%pK\n",
896 sqe->opcode, sqe->flags,
897 (void *)(uintptr_t)sqe->id);
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200898
899 if (unlikely(rv < 0))
900 break;
901
902 /* make SQE only valid after completely written */
903 smp_wmb();
904 sqe->flags |= SIW_WQE_VALID;
905
906 qp->sq_put++;
907 wr = wr->next;
908 }
909
910 /*
911 * Send directly if SQ processing is not in progress.
912 * Eventual immediate errors (rv < 0) do not affect the involved
913 * RI resources (Verbs, 8.3.1) and thus do not prevent from SQ
914 * processing, if new work is already pending. But rv must be passed
915 * to caller.
916 */
917 if (wqe->wr_status != SIW_WR_IDLE) {
918 spin_unlock_irqrestore(&qp->sq_lock, flags);
919 goto skip_direct_sending;
920 }
921 rv = siw_activate_tx(qp);
922 spin_unlock_irqrestore(&qp->sq_lock, flags);
923
924 if (rv <= 0)
925 goto skip_direct_sending;
926
Bernard Metzler58fb0b52019-12-10 17:17:29 +0100927 if (rdma_is_kernel_res(&qp->base_qp.res)) {
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200928 rv = siw_sq_start(qp);
929 } else {
930 qp->tx_ctx.in_syscall = 1;
931
932 if (siw_qp_sq_process(qp) != 0 && !(qp->tx_ctx.tx_suspend))
933 siw_qp_cm_drop(qp, 0);
934
935 qp->tx_ctx.in_syscall = 0;
936 }
937skip_direct_sending:
938
939 up_read(&qp->state_lock);
940
941 if (rv >= 0)
942 return 0;
943 /*
944 * Immediate error
945 */
946 siw_dbg_qp(qp, "error %d\n", rv);
947
948 *bad_wr = wr;
949 return rv;
950}
951
952/*
953 * siw_post_receive()
954 *
955 * Post a list of R-WR's to a RQ.
956 *
957 * @base_qp: Base QP contained in siw QP
958 * @wr: Null terminated list of user WR's
959 * @bad_wr: Points to failing WR in case of synchronous failure.
960 */
961int siw_post_receive(struct ib_qp *base_qp, const struct ib_recv_wr *wr,
962 const struct ib_recv_wr **bad_wr)
963{
964 struct siw_qp *qp = to_siw_qp(base_qp);
965 unsigned long flags;
966 int rv = 0;
967
Bernard Metzler661f3852021-01-08 13:58:45 +0100968 if (qp->srq || qp->attrs.rq_size == 0) {
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200969 *bad_wr = wr;
Bernard Metzler661f3852021-01-08 13:58:45 +0100970 return -EINVAL;
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200971 }
Bernard Metzler58fb0b52019-12-10 17:17:29 +0100972 if (!rdma_is_kernel_res(&qp->base_qp.res)) {
973 siw_dbg_qp(qp, "no kernel post_recv for user mapped rq\n");
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200974 *bad_wr = wr;
975 return -EINVAL;
976 }
Bernard Metzlercf049bb2019-10-04 14:53:56 +0200977
Bernard Metzler303ae1c2019-06-20 18:21:27 +0200978 /*
979 * Try to acquire QP state lock. Must be non-blocking
980 * to accommodate kernel clients needs.
981 */
982 if (!down_read_trylock(&qp->state_lock)) {
Bernard Metzlercf049bb2019-10-04 14:53:56 +0200983 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
984 /*
985 * ERROR state is final, so we can be sure
986 * this state will not change as long as the QP
987 * exists.
988 *
989 * This handles an ib_drain_rq() call with
990 * a concurrent request to set the QP state
991 * to ERROR.
992 */
993 rv = siw_rq_flush_wr(qp, wr, bad_wr);
994 } else {
995 siw_dbg_qp(qp, "QP locked, state %d\n",
996 qp->attrs.state);
997 *bad_wr = wr;
998 rv = -ENOTCONN;
999 }
1000 return rv;
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001001 }
1002 if (qp->attrs.state > SIW_QP_STATE_RTS) {
Bernard Metzlercf049bb2019-10-04 14:53:56 +02001003 if (qp->attrs.state == SIW_QP_STATE_ERROR) {
1004 /*
1005 * Immediately flush this WR to CQ, if QP
1006 * is in ERROR state. RQ is guaranteed to
1007 * be empty, so WR complets in-order.
1008 *
1009 * Typically triggered by ib_drain_rq().
1010 */
1011 rv = siw_rq_flush_wr(qp, wr, bad_wr);
1012 } else {
1013 siw_dbg_qp(qp, "QP out of state %d\n",
1014 qp->attrs.state);
1015 *bad_wr = wr;
1016 rv = -ENOTCONN;
1017 }
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001018 up_read(&qp->state_lock);
Bernard Metzlercf049bb2019-10-04 14:53:56 +02001019 return rv;
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001020 }
1021 /*
1022 * Serialize potentially multiple producers.
1023 * Not needed for single threaded consumer side.
1024 */
1025 spin_lock_irqsave(&qp->rq_lock, flags);
1026
1027 while (wr) {
1028 u32 idx = qp->rq_put % qp->attrs.rq_size;
1029 struct siw_rqe *rqe = &qp->recvq[idx];
1030
1031 if (rqe->flags) {
1032 siw_dbg_qp(qp, "RQ full\n");
1033 rv = -ENOMEM;
1034 break;
1035 }
1036 if (wr->num_sge > qp->attrs.rq_max_sges) {
1037 siw_dbg_qp(qp, "too many sge's: %d\n", wr->num_sge);
1038 rv = -EINVAL;
1039 break;
1040 }
1041 rqe->id = wr->wr_id;
1042 rqe->num_sge = wr->num_sge;
1043 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1044
1045 /* make sure RQE is completely written before valid */
1046 smp_wmb();
1047
1048 rqe->flags = SIW_WQE_VALID;
1049
1050 qp->rq_put++;
1051 wr = wr->next;
1052 }
1053 spin_unlock_irqrestore(&qp->rq_lock, flags);
1054
1055 up_read(&qp->state_lock);
1056
1057 if (rv < 0) {
1058 siw_dbg_qp(qp, "error %d\n", rv);
1059 *bad_wr = wr;
1060 }
1061 return rv > 0 ? 0 : rv;
1062}
1063
Leon Romanovsky43d781b2020-09-07 15:09:18 +03001064int siw_destroy_cq(struct ib_cq *base_cq, struct ib_udata *udata)
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001065{
1066 struct siw_cq *cq = to_siw_cq(base_cq);
1067 struct siw_device *sdev = to_siw_dev(base_cq->device);
1068 struct siw_ucontext *ctx =
1069 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1070 base_ucontext);
1071
1072 siw_dbg_cq(cq, "free CQ resources\n");
1073
1074 siw_cq_flush(cq);
1075
Michal Kalderon11f1a752019-10-30 11:44:14 +02001076 if (ctx)
1077 rdma_user_mmap_entry_remove(cq->cq_entry);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001078
1079 atomic_dec(&sdev->num_cq);
1080
1081 vfree(cq->queue);
Leon Romanovsky43d781b2020-09-07 15:09:18 +03001082 return 0;
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001083}
1084
1085/*
1086 * siw_create_cq()
1087 *
1088 * Populate CQ of requested size
1089 *
1090 * @base_cq: CQ as allocated by RDMA midlayer
1091 * @attr: Initial CQ attributes
1092 * @udata: relates to user context
1093 */
1094
1095int siw_create_cq(struct ib_cq *base_cq, const struct ib_cq_init_attr *attr,
1096 struct ib_udata *udata)
1097{
1098 struct siw_device *sdev = to_siw_dev(base_cq->device);
1099 struct siw_cq *cq = to_siw_cq(base_cq);
1100 int rv, size = attr->cqe;
1101
Jason Gunthorpe1c407cb2020-10-03 20:20:07 -03001102 if (attr->flags)
1103 return -EOPNOTSUPP;
1104
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001105 if (atomic_inc_return(&sdev->num_cq) > SIW_MAX_CQ) {
1106 siw_dbg(base_cq->device, "too many CQ's\n");
1107 rv = -ENOMEM;
1108 goto err_out;
1109 }
1110 if (size < 1 || size > sdev->attrs.max_cqe) {
1111 siw_dbg(base_cq->device, "CQ size error: %d\n", size);
1112 rv = -EINVAL;
1113 goto err_out;
1114 }
1115 size = roundup_pow_of_two(size);
1116 cq->base_cq.cqe = size;
1117 cq->num_cqe = size;
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001118
Bernard Metzler58fb0b52019-12-10 17:17:29 +01001119 if (udata)
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001120 cq->queue = vmalloc_user(size * sizeof(struct siw_cqe) +
1121 sizeof(struct siw_cq_ctrl));
Bernard Metzler58fb0b52019-12-10 17:17:29 +01001122 else
1123 cq->queue = vzalloc(size * sizeof(struct siw_cqe) +
1124 sizeof(struct siw_cq_ctrl));
1125
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001126 if (cq->queue == NULL) {
1127 rv = -ENOMEM;
1128 goto err_out;
1129 }
1130 get_random_bytes(&cq->id, 4);
1131 siw_dbg(base_cq->device, "new CQ [%u]\n", cq->id);
1132
1133 spin_lock_init(&cq->lock);
1134
Bernard Metzler2c8ccb32019-08-09 17:18:16 +02001135 cq->notify = (struct siw_cq_ctrl *)&cq->queue[size];
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001136
1137 if (udata) {
1138 struct siw_uresp_create_cq uresp = {};
1139 struct siw_ucontext *ctx =
1140 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1141 base_ucontext);
Michal Kalderon11f1a752019-10-30 11:44:14 +02001142 size_t length = size * sizeof(struct siw_cqe) +
1143 sizeof(struct siw_cq_ctrl);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001144
Michal Kalderon11f1a752019-10-30 11:44:14 +02001145 cq->cq_entry =
1146 siw_mmap_entry_insert(ctx, cq->queue,
1147 length, &uresp.cq_key);
1148 if (!cq->cq_entry) {
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001149 rv = -ENOMEM;
1150 goto err_out;
1151 }
Michal Kalderon11f1a752019-10-30 11:44:14 +02001152
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001153 uresp.cq_id = cq->id;
1154 uresp.num_cqe = size;
1155
1156 if (udata->outlen < sizeof(uresp)) {
1157 rv = -EINVAL;
1158 goto err_out;
1159 }
1160 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1161 if (rv)
1162 goto err_out;
1163 }
1164 return 0;
1165
1166err_out:
1167 siw_dbg(base_cq->device, "CQ creation failed: %d", rv);
1168
1169 if (cq && cq->queue) {
1170 struct siw_ucontext *ctx =
1171 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1172 base_ucontext);
Michal Kalderon11f1a752019-10-30 11:44:14 +02001173 if (ctx)
1174 rdma_user_mmap_entry_remove(cq->cq_entry);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001175 vfree(cq->queue);
1176 }
1177 atomic_dec(&sdev->num_cq);
1178
1179 return rv;
1180}
1181
1182/*
1183 * siw_poll_cq()
1184 *
1185 * Reap CQ entries if available and copy work completion status into
1186 * array of WC's provided by caller. Returns number of reaped CQE's.
1187 *
1188 * @base_cq: Base CQ contained in siw CQ.
1189 * @num_cqe: Maximum number of CQE's to reap.
1190 * @wc: Array of work completions to be filled by siw.
1191 */
1192int siw_poll_cq(struct ib_cq *base_cq, int num_cqe, struct ib_wc *wc)
1193{
1194 struct siw_cq *cq = to_siw_cq(base_cq);
1195 int i;
1196
1197 for (i = 0; i < num_cqe; i++) {
1198 if (!siw_reap_cqe(cq, wc))
1199 break;
1200 wc++;
1201 }
1202 return i;
1203}
1204
1205/*
1206 * siw_req_notify_cq()
1207 *
1208 * Request notification for new CQE's added to that CQ.
1209 * Defined flags:
1210 * o SIW_CQ_NOTIFY_SOLICITED lets siw trigger a notification
1211 * event if a WQE with notification flag set enters the CQ
1212 * o SIW_CQ_NOTIFY_NEXT_COMP lets siw trigger a notification
1213 * event if a WQE enters the CQ.
1214 * o IB_CQ_REPORT_MISSED_EVENTS: return value will provide the
1215 * number of not reaped CQE's regardless of its notification
1216 * type and current or new CQ notification settings.
1217 *
1218 * @base_cq: Base CQ contained in siw CQ.
1219 * @flags: Requested notification flags.
1220 */
1221int siw_req_notify_cq(struct ib_cq *base_cq, enum ib_cq_notify_flags flags)
1222{
1223 struct siw_cq *cq = to_siw_cq(base_cq);
1224
1225 siw_dbg_cq(cq, "flags: 0x%02x\n", flags);
1226
1227 if ((flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED)
Bernard Metzler2c8ccb32019-08-09 17:18:16 +02001228 /*
1229 * Enable CQ event for next solicited completion.
1230 * and make it visible to all associated producers.
1231 */
1232 smp_store_mb(cq->notify->flags, SIW_NOTIFY_SOLICITED);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001233 else
Bernard Metzler2c8ccb32019-08-09 17:18:16 +02001234 /*
1235 * Enable CQ event for any signalled completion.
1236 * and make it visible to all associated producers.
1237 */
1238 smp_store_mb(cq->notify->flags, SIW_NOTIFY_ALL);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001239
1240 if (flags & IB_CQ_REPORT_MISSED_EVENTS)
1241 return cq->cq_put - cq->cq_get;
1242
1243 return 0;
1244}
1245
1246/*
1247 * siw_dereg_mr()
1248 *
1249 * Release Memory Region.
1250 *
1251 * @base_mr: Base MR contained in siw MR.
1252 * @udata: points to user context, unused.
1253 */
1254int siw_dereg_mr(struct ib_mr *base_mr, struct ib_udata *udata)
1255{
1256 struct siw_mr *mr = to_siw_mr(base_mr);
1257 struct siw_device *sdev = to_siw_dev(base_mr->device);
1258
1259 siw_dbg_mem(mr->mem, "deregister MR\n");
1260
1261 atomic_dec(&sdev->num_mr);
1262
1263 siw_mr_drop_mem(mr);
1264 kfree_rcu(mr, rcu);
1265
1266 return 0;
1267}
1268
1269/*
1270 * siw_reg_user_mr()
1271 *
1272 * Register Memory Region.
1273 *
1274 * @pd: Protection Domain
1275 * @start: starting address of MR (virtual address)
1276 * @len: len of MR
1277 * @rnic_va: not used by siw
1278 * @rights: MR access rights
1279 * @udata: user buffer to communicate STag and Key.
1280 */
1281struct ib_mr *siw_reg_user_mr(struct ib_pd *pd, u64 start, u64 len,
1282 u64 rnic_va, int rights, struct ib_udata *udata)
1283{
1284 struct siw_mr *mr = NULL;
1285 struct siw_umem *umem = NULL;
1286 struct siw_ureq_reg_mr ureq;
1287 struct siw_device *sdev = to_siw_dev(pd->device);
1288
1289 unsigned long mem_limit = rlimit(RLIMIT_MEMLOCK);
1290 int rv;
1291
Bernard Metzlerc5362772019-08-22 19:37:38 +02001292 siw_dbg_pd(pd, "start: 0x%pK, va: 0x%pK, len: %llu\n",
1293 (void *)(uintptr_t)start, (void *)(uintptr_t)rnic_va,
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001294 (unsigned long long)len);
1295
1296 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1297 siw_dbg_pd(pd, "too many mr's\n");
1298 rv = -ENOMEM;
1299 goto err_out;
1300 }
1301 if (!len) {
1302 rv = -EINVAL;
1303 goto err_out;
1304 }
1305 if (mem_limit != RLIM_INFINITY) {
1306 unsigned long num_pages =
1307 (PAGE_ALIGN(len + (start & ~PAGE_MASK))) >> PAGE_SHIFT;
1308 mem_limit >>= PAGE_SHIFT;
1309
1310 if (num_pages > mem_limit - current->mm->locked_vm) {
1311 siw_dbg_pd(pd, "pages req %lu, max %lu, lock %lu\n",
1312 num_pages, mem_limit,
1313 current->mm->locked_vm);
1314 rv = -ENOMEM;
1315 goto err_out;
1316 }
1317 }
1318 umem = siw_umem_get(start, len, ib_access_writable(rights));
1319 if (IS_ERR(umem)) {
1320 rv = PTR_ERR(umem);
1321 siw_dbg_pd(pd, "getting user memory failed: %d\n", rv);
1322 umem = NULL;
1323 goto err_out;
1324 }
1325 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1326 if (!mr) {
1327 rv = -ENOMEM;
1328 goto err_out;
1329 }
1330 rv = siw_mr_add_mem(mr, pd, umem, start, len, rights);
1331 if (rv)
1332 goto err_out;
1333
1334 if (udata) {
1335 struct siw_uresp_reg_mr uresp = {};
1336 struct siw_mem *mem = mr->mem;
1337
1338 if (udata->inlen < sizeof(ureq)) {
1339 rv = -EINVAL;
1340 goto err_out;
1341 }
1342 rv = ib_copy_from_udata(&ureq, udata, sizeof(ureq));
1343 if (rv)
1344 goto err_out;
1345
1346 mr->base_mr.lkey |= ureq.stag_key;
1347 mr->base_mr.rkey |= ureq.stag_key;
1348 mem->stag |= ureq.stag_key;
1349 uresp.stag = mem->stag;
1350
1351 if (udata->outlen < sizeof(uresp)) {
1352 rv = -EINVAL;
1353 goto err_out;
1354 }
1355 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1356 if (rv)
1357 goto err_out;
1358 }
1359 mr->mem->stag_valid = 1;
1360
1361 return &mr->base_mr;
1362
1363err_out:
1364 atomic_dec(&sdev->num_mr);
1365 if (mr) {
1366 if (mr->mem)
1367 siw_mr_drop_mem(mr);
1368 kfree_rcu(mr, rcu);
1369 } else {
1370 if (umem)
1371 siw_umem_release(umem, false);
1372 }
1373 return ERR_PTR(rv);
1374}
1375
1376struct ib_mr *siw_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
Gal Pressman42a3b152020-07-06 15:03:43 +03001377 u32 max_sge)
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001378{
1379 struct siw_device *sdev = to_siw_dev(pd->device);
1380 struct siw_mr *mr = NULL;
1381 struct siw_pbl *pbl = NULL;
1382 int rv;
1383
1384 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1385 siw_dbg_pd(pd, "too many mr's\n");
1386 rv = -ENOMEM;
1387 goto err_out;
1388 }
1389 if (mr_type != IB_MR_TYPE_MEM_REG) {
1390 siw_dbg_pd(pd, "mr type %d unsupported\n", mr_type);
1391 rv = -EOPNOTSUPP;
1392 goto err_out;
1393 }
1394 if (max_sge > SIW_MAX_SGE_PBL) {
1395 siw_dbg_pd(pd, "too many sge's: %d\n", max_sge);
1396 rv = -ENOMEM;
1397 goto err_out;
1398 }
1399 pbl = siw_pbl_alloc(max_sge);
1400 if (IS_ERR(pbl)) {
1401 rv = PTR_ERR(pbl);
1402 siw_dbg_pd(pd, "pbl allocation failed: %d\n", rv);
1403 pbl = NULL;
1404 goto err_out;
1405 }
1406 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1407 if (!mr) {
1408 rv = -ENOMEM;
1409 goto err_out;
1410 }
1411 rv = siw_mr_add_mem(mr, pd, pbl, 0, max_sge * PAGE_SIZE, 0);
1412 if (rv)
1413 goto err_out;
1414
1415 mr->mem->is_pbl = 1;
1416
1417 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1418
1419 return &mr->base_mr;
1420
1421err_out:
1422 atomic_dec(&sdev->num_mr);
1423
1424 if (!mr) {
1425 kfree(pbl);
1426 } else {
1427 if (mr->mem)
1428 siw_mr_drop_mem(mr);
1429 kfree_rcu(mr, rcu);
1430 }
1431 siw_dbg_pd(pd, "failed: %d\n", rv);
1432
1433 return ERR_PTR(rv);
1434}
1435
1436/* Just used to count number of pages being mapped */
1437static int siw_set_pbl_page(struct ib_mr *base_mr, u64 buf_addr)
1438{
1439 return 0;
1440}
1441
1442int siw_map_mr_sg(struct ib_mr *base_mr, struct scatterlist *sl, int num_sle,
1443 unsigned int *sg_off)
1444{
1445 struct scatterlist *slp;
1446 struct siw_mr *mr = to_siw_mr(base_mr);
1447 struct siw_mem *mem = mr->mem;
1448 struct siw_pbl *pbl = mem->pbl;
1449 struct siw_pble *pble;
Bernard Metzlerc5362772019-08-22 19:37:38 +02001450 unsigned long pbl_size;
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001451 int i, rv;
1452
1453 if (!pbl) {
1454 siw_dbg_mem(mem, "no PBL allocated\n");
1455 return -EINVAL;
1456 }
1457 pble = pbl->pbe;
1458
1459 if (pbl->max_buf < num_sle) {
1460 siw_dbg_mem(mem, "too many SGE's: %d > %d\n",
1461 mem->pbl->max_buf, num_sle);
1462 return -ENOMEM;
1463 }
1464 for_each_sg(sl, slp, num_sle, i) {
1465 if (sg_dma_len(slp) == 0) {
1466 siw_dbg_mem(mem, "empty SGE\n");
1467 return -EINVAL;
1468 }
1469 if (i == 0) {
1470 pble->addr = sg_dma_address(slp);
1471 pble->size = sg_dma_len(slp);
1472 pble->pbl_off = 0;
1473 pbl_size = pble->size;
1474 pbl->num_buf = 1;
1475 } else {
1476 /* Merge PBL entries if adjacent */
1477 if (pble->addr + pble->size == sg_dma_address(slp)) {
1478 pble->size += sg_dma_len(slp);
1479 } else {
1480 pble++;
1481 pbl->num_buf++;
1482 pble->addr = sg_dma_address(slp);
1483 pble->size = sg_dma_len(slp);
1484 pble->pbl_off = pbl_size;
1485 }
1486 pbl_size += sg_dma_len(slp);
1487 }
1488 siw_dbg_mem(mem,
Bernard Metzlerc5362772019-08-22 19:37:38 +02001489 "sge[%d], size %u, addr 0x%p, total %lu\n",
1490 i, pble->size, (void *)(uintptr_t)pble->addr,
1491 pbl_size);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001492 }
1493 rv = ib_sg_to_pages(base_mr, sl, num_sle, sg_off, siw_set_pbl_page);
1494 if (rv > 0) {
1495 mem->len = base_mr->length;
1496 mem->va = base_mr->iova;
1497 siw_dbg_mem(mem,
Bernard Metzlerc5362772019-08-22 19:37:38 +02001498 "%llu bytes, start 0x%pK, %u SLE to %u entries\n",
1499 mem->len, (void *)(uintptr_t)mem->va, num_sle,
1500 pbl->num_buf);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001501 }
1502 return rv;
1503}
1504
1505/*
1506 * siw_get_dma_mr()
1507 *
1508 * Create a (empty) DMA memory region, where no umem is attached.
1509 */
1510struct ib_mr *siw_get_dma_mr(struct ib_pd *pd, int rights)
1511{
1512 struct siw_device *sdev = to_siw_dev(pd->device);
1513 struct siw_mr *mr = NULL;
1514 int rv;
1515
1516 if (atomic_inc_return(&sdev->num_mr) > SIW_MAX_MR) {
1517 siw_dbg_pd(pd, "too many mr's\n");
1518 rv = -ENOMEM;
1519 goto err_out;
1520 }
1521 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1522 if (!mr) {
1523 rv = -ENOMEM;
1524 goto err_out;
1525 }
1526 rv = siw_mr_add_mem(mr, pd, NULL, 0, ULONG_MAX, rights);
1527 if (rv)
1528 goto err_out;
1529
1530 mr->mem->stag_valid = 1;
1531
1532 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag);
1533
1534 return &mr->base_mr;
1535
1536err_out:
1537 if (rv)
1538 kfree(mr);
1539
1540 atomic_dec(&sdev->num_mr);
1541
1542 return ERR_PTR(rv);
1543}
1544
1545/*
1546 * siw_create_srq()
1547 *
1548 * Create Shared Receive Queue of attributes @init_attrs
1549 * within protection domain given by @pd.
1550 *
1551 * @base_srq: Base SRQ contained in siw SRQ.
1552 * @init_attrs: SRQ init attributes.
1553 * @udata: points to user context
1554 */
1555int siw_create_srq(struct ib_srq *base_srq,
1556 struct ib_srq_init_attr *init_attrs, struct ib_udata *udata)
1557{
1558 struct siw_srq *srq = to_siw_srq(base_srq);
1559 struct ib_srq_attr *attrs = &init_attrs->attr;
1560 struct siw_device *sdev = to_siw_dev(base_srq->device);
1561 struct siw_ucontext *ctx =
1562 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1563 base_ucontext);
1564 int rv;
1565
Jason Gunthorpe652caba2020-10-03 20:20:05 -03001566 if (init_attrs->srq_type != IB_SRQT_BASIC)
1567 return -EOPNOTSUPP;
1568
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001569 if (atomic_inc_return(&sdev->num_srq) > SIW_MAX_SRQ) {
1570 siw_dbg_pd(base_srq->pd, "too many SRQ's\n");
1571 rv = -ENOMEM;
1572 goto err_out;
1573 }
1574 if (attrs->max_wr == 0 || attrs->max_wr > SIW_MAX_SRQ_WR ||
1575 attrs->max_sge > SIW_MAX_SGE || attrs->srq_limit > attrs->max_wr) {
1576 rv = -EINVAL;
1577 goto err_out;
1578 }
1579 srq->max_sge = attrs->max_sge;
1580 srq->num_rqe = roundup_pow_of_two(attrs->max_wr);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001581 srq->limit = attrs->srq_limit;
1582 if (srq->limit)
Bernard Metzler58fb0b52019-12-10 17:17:29 +01001583 srq->armed = true;
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001584
Bernard Metzler58fb0b52019-12-10 17:17:29 +01001585 srq->is_kernel_res = !udata;
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001586
1587 if (udata)
1588 srq->recvq =
1589 vmalloc_user(srq->num_rqe * sizeof(struct siw_rqe));
1590 else
1591 srq->recvq = vzalloc(srq->num_rqe * sizeof(struct siw_rqe));
1592
1593 if (srq->recvq == NULL) {
1594 rv = -ENOMEM;
1595 goto err_out;
1596 }
1597 if (udata) {
1598 struct siw_uresp_create_srq uresp = {};
Michal Kalderon11f1a752019-10-30 11:44:14 +02001599 size_t length = srq->num_rqe * sizeof(struct siw_rqe);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001600
Michal Kalderon11f1a752019-10-30 11:44:14 +02001601 srq->srq_entry =
1602 siw_mmap_entry_insert(ctx, srq->recvq,
1603 length, &uresp.srq_key);
1604 if (!srq->srq_entry) {
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001605 rv = -ENOMEM;
1606 goto err_out;
1607 }
Michal Kalderon11f1a752019-10-30 11:44:14 +02001608
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001609 uresp.num_rqe = srq->num_rqe;
1610
1611 if (udata->outlen < sizeof(uresp)) {
1612 rv = -EINVAL;
1613 goto err_out;
1614 }
1615 rv = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1616 if (rv)
1617 goto err_out;
1618 }
1619 spin_lock_init(&srq->lock);
1620
Bernard Metzlerc5362772019-08-22 19:37:38 +02001621 siw_dbg_pd(base_srq->pd, "[SRQ]: success\n");
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001622
1623 return 0;
1624
1625err_out:
1626 if (srq->recvq) {
Michal Kalderon11f1a752019-10-30 11:44:14 +02001627 if (ctx)
1628 rdma_user_mmap_entry_remove(srq->srq_entry);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001629 vfree(srq->recvq);
1630 }
1631 atomic_dec(&sdev->num_srq);
1632
1633 return rv;
1634}
1635
1636/*
1637 * siw_modify_srq()
1638 *
1639 * Modify SRQ. The caller may resize SRQ and/or set/reset notification
1640 * limit and (re)arm IB_EVENT_SRQ_LIMIT_REACHED notification.
1641 *
1642 * NOTE: it is unclear if RDMA core allows for changing the MAX_SGE
1643 * parameter. siw_modify_srq() does not check the attrs->max_sge param.
1644 */
1645int siw_modify_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs,
1646 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata)
1647{
1648 struct siw_srq *srq = to_siw_srq(base_srq);
1649 unsigned long flags;
1650 int rv = 0;
1651
1652 spin_lock_irqsave(&srq->lock, flags);
1653
1654 if (attr_mask & IB_SRQ_MAX_WR) {
1655 /* resize request not yet supported */
1656 rv = -EOPNOTSUPP;
1657 goto out;
1658 }
1659 if (attr_mask & IB_SRQ_LIMIT) {
1660 if (attrs->srq_limit) {
1661 if (unlikely(attrs->srq_limit > srq->num_rqe)) {
1662 rv = -EINVAL;
1663 goto out;
1664 }
Bernard Metzler58fb0b52019-12-10 17:17:29 +01001665 srq->armed = true;
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001666 } else {
Bernard Metzler58fb0b52019-12-10 17:17:29 +01001667 srq->armed = false;
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001668 }
1669 srq->limit = attrs->srq_limit;
1670 }
1671out:
1672 spin_unlock_irqrestore(&srq->lock, flags);
1673
1674 return rv;
1675}
1676
1677/*
1678 * siw_query_srq()
1679 *
1680 * Query SRQ attributes.
1681 */
1682int siw_query_srq(struct ib_srq *base_srq, struct ib_srq_attr *attrs)
1683{
1684 struct siw_srq *srq = to_siw_srq(base_srq);
1685 unsigned long flags;
1686
1687 spin_lock_irqsave(&srq->lock, flags);
1688
1689 attrs->max_wr = srq->num_rqe;
1690 attrs->max_sge = srq->max_sge;
1691 attrs->srq_limit = srq->limit;
1692
1693 spin_unlock_irqrestore(&srq->lock, flags);
1694
1695 return 0;
1696}
1697
1698/*
1699 * siw_destroy_srq()
1700 *
1701 * Destroy SRQ.
1702 * It is assumed that the SRQ is not referenced by any
1703 * QP anymore - the code trusts the RDMA core environment to keep track
1704 * of QP references.
1705 */
Leon Romanovsky119181d2020-09-07 15:09:16 +03001706int siw_destroy_srq(struct ib_srq *base_srq, struct ib_udata *udata)
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001707{
1708 struct siw_srq *srq = to_siw_srq(base_srq);
1709 struct siw_device *sdev = to_siw_dev(base_srq->device);
1710 struct siw_ucontext *ctx =
1711 rdma_udata_to_drv_context(udata, struct siw_ucontext,
1712 base_ucontext);
1713
Michal Kalderon11f1a752019-10-30 11:44:14 +02001714 if (ctx)
1715 rdma_user_mmap_entry_remove(srq->srq_entry);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001716 vfree(srq->recvq);
1717 atomic_dec(&sdev->num_srq);
Leon Romanovsky119181d2020-09-07 15:09:16 +03001718 return 0;
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001719}
1720
1721/*
1722 * siw_post_srq_recv()
1723 *
1724 * Post a list of receive queue elements to SRQ.
1725 * NOTE: The function does not check or lock a certain SRQ state
1726 * during the post operation. The code simply trusts the
1727 * RDMA core environment.
1728 *
1729 * @base_srq: Base SRQ contained in siw SRQ
1730 * @wr: List of R-WR's
1731 * @bad_wr: Updated to failing WR if posting fails.
1732 */
1733int siw_post_srq_recv(struct ib_srq *base_srq, const struct ib_recv_wr *wr,
1734 const struct ib_recv_wr **bad_wr)
1735{
1736 struct siw_srq *srq = to_siw_srq(base_srq);
1737 unsigned long flags;
1738 int rv = 0;
1739
Bernard Metzler58fb0b52019-12-10 17:17:29 +01001740 if (unlikely(!srq->is_kernel_res)) {
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001741 siw_dbg_pd(base_srq->pd,
Bernard Metzlerc5362772019-08-22 19:37:38 +02001742 "[SRQ]: no kernel post_recv for mapped srq\n");
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001743 rv = -EINVAL;
1744 goto out;
1745 }
1746 /*
1747 * Serialize potentially multiple producers.
1748 * Also needed to serialize potentially multiple
1749 * consumers.
1750 */
1751 spin_lock_irqsave(&srq->lock, flags);
1752
1753 while (wr) {
1754 u32 idx = srq->rq_put % srq->num_rqe;
1755 struct siw_rqe *rqe = &srq->recvq[idx];
1756
1757 if (rqe->flags) {
1758 siw_dbg_pd(base_srq->pd, "SRQ full\n");
1759 rv = -ENOMEM;
1760 break;
1761 }
1762 if (unlikely(wr->num_sge > srq->max_sge)) {
1763 siw_dbg_pd(base_srq->pd,
Bernard Metzlerc5362772019-08-22 19:37:38 +02001764 "[SRQ]: too many sge's: %d\n", wr->num_sge);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001765 rv = -EINVAL;
1766 break;
1767 }
1768 rqe->id = wr->wr_id;
1769 rqe->num_sge = wr->num_sge;
1770 siw_copy_sgl(wr->sg_list, rqe->sge, wr->num_sge);
1771
1772 /* Make sure S-RQE is completely written before valid */
1773 smp_wmb();
1774
1775 rqe->flags = SIW_WQE_VALID;
1776
1777 srq->rq_put++;
1778 wr = wr->next;
1779 }
1780 spin_unlock_irqrestore(&srq->lock, flags);
1781out:
1782 if (unlikely(rv < 0)) {
Bernard Metzlerc5362772019-08-22 19:37:38 +02001783 siw_dbg_pd(base_srq->pd, "[SRQ]: error %d\n", rv);
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001784 *bad_wr = wr;
1785 }
1786 return rv;
1787}
1788
1789void siw_qp_event(struct siw_qp *qp, enum ib_event_type etype)
1790{
1791 struct ib_event event;
Bernard Metzler58fb0b52019-12-10 17:17:29 +01001792 struct ib_qp *base_qp = &qp->base_qp;
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001793
1794 /*
1795 * Do not report asynchronous errors on QP which gets
1796 * destroyed via verbs interface (siw_destroy_qp())
1797 */
1798 if (qp->attrs.flags & SIW_QP_IN_DESTROY)
1799 return;
1800
1801 event.event = etype;
1802 event.device = base_qp->device;
1803 event.element.qp = base_qp;
1804
1805 if (base_qp->event_handler) {
1806 siw_dbg_qp(qp, "reporting event %d\n", etype);
1807 base_qp->event_handler(&event, base_qp->qp_context);
1808 }
1809}
1810
1811void siw_cq_event(struct siw_cq *cq, enum ib_event_type etype)
1812{
1813 struct ib_event event;
1814 struct ib_cq *base_cq = &cq->base_cq;
1815
1816 event.event = etype;
1817 event.device = base_cq->device;
1818 event.element.cq = base_cq;
1819
1820 if (base_cq->event_handler) {
1821 siw_dbg_cq(cq, "reporting CQ event %d\n", etype);
1822 base_cq->event_handler(&event, base_cq->cq_context);
1823 }
1824}
1825
1826void siw_srq_event(struct siw_srq *srq, enum ib_event_type etype)
1827{
1828 struct ib_event event;
1829 struct ib_srq *base_srq = &srq->base_srq;
1830
1831 event.event = etype;
1832 event.device = base_srq->device;
1833 event.element.srq = base_srq;
1834
1835 if (base_srq->event_handler) {
1836 siw_dbg_pd(srq->base_srq.pd,
1837 "reporting SRQ event %d\n", etype);
1838 base_srq->event_handler(&event, base_srq->srq_context);
1839 }
1840}
1841
Mark Bloch1fb7f892021-03-01 09:04:20 +02001842void siw_port_event(struct siw_device *sdev, u32 port, enum ib_event_type etype)
Bernard Metzler303ae1c2019-06-20 18:21:27 +02001843{
1844 struct ib_event event;
1845
1846 event.event = etype;
1847 event.device = &sdev->base_dev;
1848 event.element.port_num = port;
1849
1850 siw_dbg(&sdev->base_dev, "reporting port event %d\n", etype);
1851
1852 ib_dispatch_event(&event);
1853}