blob: b8f891660516c0168d8a218820ce18a86ec7b757 [file] [log] [blame]
Devesh Sharma71ee6732015-07-24 05:03:59 +05301/* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
5 * www.emulex.com
6 *
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 *
16 * - Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Parav Panditfe2caef2012-03-21 04:09:06 +053034 *
35 * Contact Information:
36 * linux-drivers@emulex.com
37 *
38 * Emulex
39 * 3333 Susan Street
40 * Costa Mesa, CA 92626
Devesh Sharma71ee6732015-07-24 05:03:59 +053041 */
Parav Panditfe2caef2012-03-21 04:09:06 +053042
43#include <linux/dma-mapping.h>
44#include <rdma/ib_verbs.h>
45#include <rdma/ib_user_verbs.h>
46#include <rdma/iw_cm.h>
47#include <rdma/ib_umem.h>
48#include <rdma/ib_addr.h>
Somnath Koturcc369292015-07-30 18:33:31 +030049#include <rdma/ib_cache.h>
Parav Panditfe2caef2012-03-21 04:09:06 +053050
51#include "ocrdma.h"
52#include "ocrdma_hw.h"
53#include "ocrdma_verbs.h"
Leon Romanovskya7fe7382016-09-22 17:31:15 +030054#include <rdma/ocrdma-abi.h>
Parav Panditfe2caef2012-03-21 04:09:06 +053055
56int ocrdma_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey)
57{
Gal Pressmanb1889402019-01-07 17:27:55 +020058 if (index > 0)
Parav Panditfe2caef2012-03-21 04:09:06 +053059 return -EINVAL;
60
61 *pkey = 0xffff;
62 return 0;
63}
64
Matan Barak2528e332015-06-11 16:35:25 +030065int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr,
66 struct ib_udata *uhw)
Parav Panditfe2caef2012-03-21 04:09:06 +053067{
68 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
69
Matan Barak2528e332015-06-11 16:35:25 +030070 if (uhw->inlen || uhw->outlen)
71 return -EINVAL;
72
Parav Panditfe2caef2012-03-21 04:09:06 +053073 memset(attr, 0, sizeof *attr);
74 memcpy(&attr->fw_ver, &dev->attr.fw_ver[0],
75 min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver)));
76 ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid);
Mitesh Ahuja033edd42014-06-10 19:32:22 +053077 attr->max_mr_size = dev->attr.max_mr_size;
Parav Panditfe2caef2012-03-21 04:09:06 +053078 attr->page_size_cap = 0xffff000;
79 attr->vendor_id = dev->nic_info.pdev->vendor;
80 attr->vendor_part_id = dev->nic_info.pdev->device;
Mitesh Ahuja96c51ab2014-07-02 11:36:06 +053081 attr->hw_ver = dev->asic_id;
Parav Panditfe2caef2012-03-21 04:09:06 +053082 attr->max_qp = dev->attr.max_qp;
Naresh Gottumukkalad3cb6c02013-08-26 15:27:40 +053083 attr->max_ah = OCRDMA_MAX_AH;
Parav Panditfe2caef2012-03-21 04:09:06 +053084 attr->max_qp_wr = dev->attr.max_wqe;
85
86 attr->device_cap_flags = IB_DEVICE_CURR_QP_STATE_MOD |
87 IB_DEVICE_RC_RNR_NAK_GEN |
88 IB_DEVICE_SHUTDOWN_PORT |
89 IB_DEVICE_SYS_IMAGE_GUID |
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +053090 IB_DEVICE_LOCAL_DMA_LKEY |
91 IB_DEVICE_MEM_MGT_EXTENSIONS;
Steve Wise33023fb2018-06-18 08:05:26 -070092 attr->max_send_sge = dev->attr.max_send_sge;
93 attr->max_recv_sge = dev->attr.max_recv_sge;
Selvin Xavier3c199b42016-08-24 01:17:41 -040094 attr->max_sge_rd = dev->attr.max_rdma_sge;
Parav Panditfe2caef2012-03-21 04:09:06 +053095 attr->max_cq = dev->attr.max_cq;
96 attr->max_cqe = dev->attr.max_cqe;
97 attr->max_mr = dev->attr.max_mr;
Selvin Xavierac578ae2014-02-04 11:57:04 +053098 attr->max_mw = dev->attr.max_mw;
Parav Panditfe2caef2012-03-21 04:09:06 +053099 attr->max_pd = dev->attr.max_pd;
100 attr->atomic_cap = 0;
101 attr->max_fmr = 0;
102 attr->max_map_per_fmr = 0;
103 attr->max_qp_rd_atom =
104 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
105 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +0530106 attr->max_srq = dev->attr.max_srq;
Roland Dreierd1e09eb2012-07-07 15:13:47 -0700107 attr->max_srq_sge = dev->attr.max_srq_sge;
Parav Panditfe2caef2012-03-21 04:09:06 +0530108 attr->max_srq_wr = dev->attr.max_rqe;
109 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
Devesh Sharmad6a488f2014-06-09 10:52:37 +0530110 attr->max_fast_reg_page_list_len = dev->attr.max_pages_per_frmr;
Parav Panditfe2caef2012-03-21 04:09:06 +0530111 attr->max_pkeys = 1;
112 return 0;
113}
114
Somnath Koturcc369292015-07-30 18:33:31 +0300115struct net_device *ocrdma_get_netdev(struct ib_device *ibdev, u8 port_num)
116{
117 struct ocrdma_dev *dev;
118 struct net_device *ndev = NULL;
119
120 rcu_read_lock();
121
122 dev = get_ocrdma_dev(ibdev);
123 if (dev)
124 ndev = dev->nic_info.netdev;
125 if (ndev)
126 dev_hold(ndev);
127
128 rcu_read_unlock();
129
130 return ndev;
131}
132
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +0530133static inline void get_link_speed_and_width(struct ocrdma_dev *dev,
134 u8 *ib_speed, u8 *ib_width)
135{
136 int status;
137 u8 speed;
138
Devesh Sharma3b1ea432015-12-24 13:14:07 -0500139 status = ocrdma_mbx_get_link_speed(dev, &speed, NULL);
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +0530140 if (status)
141 speed = OCRDMA_PHYS_LINK_SPEED_ZERO;
142
143 switch (speed) {
144 case OCRDMA_PHYS_LINK_SPEED_1GBPS:
145 *ib_speed = IB_SPEED_SDR;
146 *ib_width = IB_WIDTH_1X;
147 break;
148
149 case OCRDMA_PHYS_LINK_SPEED_10GBPS:
150 *ib_speed = IB_SPEED_QDR;
151 *ib_width = IB_WIDTH_1X;
152 break;
153
154 case OCRDMA_PHYS_LINK_SPEED_20GBPS:
155 *ib_speed = IB_SPEED_DDR;
156 *ib_width = IB_WIDTH_4X;
157 break;
158
159 case OCRDMA_PHYS_LINK_SPEED_40GBPS:
160 *ib_speed = IB_SPEED_QDR;
161 *ib_width = IB_WIDTH_4X;
162 break;
163
164 default:
165 /* Unsupported */
166 *ib_speed = IB_SPEED_SDR;
167 *ib_width = IB_WIDTH_1X;
Joe Perches2b50176d2013-10-08 16:07:22 -0700168 }
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +0530169}
170
Parav Panditfe2caef2012-03-21 04:09:06 +0530171int ocrdma_query_port(struct ib_device *ibdev,
172 u8 port, struct ib_port_attr *props)
173{
174 enum ib_port_state port_state;
175 struct ocrdma_dev *dev;
176 struct net_device *netdev;
177
Or Gerlitzc4550c62017-01-24 13:02:39 +0200178 /* props being zeroed by the caller, avoid zeroing it here */
Parav Panditfe2caef2012-03-21 04:09:06 +0530179 dev = get_ocrdma_dev(ibdev);
Parav Panditfe2caef2012-03-21 04:09:06 +0530180 netdev = dev->nic_info.netdev;
181 if (netif_running(netdev) && netif_oper_up(netdev)) {
182 port_state = IB_PORT_ACTIVE;
183 props->phys_state = 5;
184 } else {
185 port_state = IB_PORT_DOWN;
186 props->phys_state = 3;
187 }
188 props->max_mtu = IB_MTU_4096;
189 props->active_mtu = iboe_get_mtu(netdev->mtu);
190 props->lid = 0;
191 props->lmc = 0;
192 props->sm_lid = 0;
193 props->sm_sl = 0;
194 props->state = port_state;
Jason Gunthorpe2f944c02018-07-04 15:57:48 +0300195 props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
196 IB_PORT_DEVICE_MGMT_SUP |
197 IB_PORT_VENDOR_CLASS_SUP;
198 props->ip_gids = true;
Parav Panditfe2caef2012-03-21 04:09:06 +0530199 props->gid_tbl_len = OCRDMA_MAX_SGID;
200 props->pkey_tbl_len = 1;
201 props->bad_pkey_cntr = 0;
202 props->qkey_viol_cntr = 0;
Naresh Gottumukkalaf24ceba2013-08-26 15:27:47 +0530203 get_link_speed_and_width(dev, &props->active_speed,
204 &props->active_width);
Parav Panditfe2caef2012-03-21 04:09:06 +0530205 props->max_msg_sz = 0x80000000;
206 props->max_vl_num = 4;
207 return 0;
208}
209
210int ocrdma_modify_port(struct ib_device *ibdev, u8 port, int mask,
211 struct ib_port_modify *props)
212{
Parav Panditfe2caef2012-03-21 04:09:06 +0530213 return 0;
214}
215
216static int ocrdma_add_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
217 unsigned long len)
218{
219 struct ocrdma_mm *mm;
220
221 mm = kzalloc(sizeof(*mm), GFP_KERNEL);
222 if (mm == NULL)
223 return -ENOMEM;
224 mm->key.phy_addr = phy_addr;
225 mm->key.len = len;
226 INIT_LIST_HEAD(&mm->entry);
227
228 mutex_lock(&uctx->mm_list_lock);
229 list_add_tail(&mm->entry, &uctx->mm_head);
230 mutex_unlock(&uctx->mm_list_lock);
231 return 0;
232}
233
234static void ocrdma_del_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
235 unsigned long len)
236{
237 struct ocrdma_mm *mm, *tmp;
238
239 mutex_lock(&uctx->mm_list_lock);
240 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530241 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
Parav Panditfe2caef2012-03-21 04:09:06 +0530242 continue;
243
244 list_del(&mm->entry);
245 kfree(mm);
246 break;
247 }
248 mutex_unlock(&uctx->mm_list_lock);
249}
250
251static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr,
252 unsigned long len)
253{
254 bool found = false;
255 struct ocrdma_mm *mm;
256
257 mutex_lock(&uctx->mm_list_lock);
258 list_for_each_entry(mm, &uctx->mm_head, entry) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530259 if (len != mm->key.len && phy_addr != mm->key.phy_addr)
Parav Panditfe2caef2012-03-21 04:09:06 +0530260 continue;
261
262 found = true;
263 break;
264 }
265 mutex_unlock(&uctx->mm_list_lock);
266 return found;
267}
268
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530269
270static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool)
271{
272 u16 pd_bitmap_idx = 0;
273 const unsigned long *pd_bitmap;
274
275 if (dpp_pool) {
276 pd_bitmap = dev->pd_mgr->pd_dpp_bitmap;
277 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
278 dev->pd_mgr->max_dpp_pd);
279 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap);
280 dev->pd_mgr->pd_dpp_count++;
281 if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh)
282 dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count;
283 } else {
284 pd_bitmap = dev->pd_mgr->pd_norm_bitmap;
285 pd_bitmap_idx = find_first_zero_bit(pd_bitmap,
286 dev->pd_mgr->max_normal_pd);
287 __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap);
288 dev->pd_mgr->pd_norm_count++;
289 if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh)
290 dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count;
291 }
292 return pd_bitmap_idx;
293}
294
295static int _ocrdma_pd_mgr_put_bitmap(struct ocrdma_dev *dev, u16 pd_id,
296 bool dpp_pool)
297{
298 u16 pd_count;
299 u16 pd_bit_index;
300
301 pd_count = dpp_pool ? dev->pd_mgr->pd_dpp_count :
302 dev->pd_mgr->pd_norm_count;
303 if (pd_count == 0)
304 return -EINVAL;
305
306 if (dpp_pool) {
307 pd_bit_index = pd_id - dev->pd_mgr->pd_dpp_start;
308 if (pd_bit_index >= dev->pd_mgr->max_dpp_pd) {
309 return -EINVAL;
310 } else {
311 __clear_bit(pd_bit_index, dev->pd_mgr->pd_dpp_bitmap);
312 dev->pd_mgr->pd_dpp_count--;
313 }
314 } else {
315 pd_bit_index = pd_id - dev->pd_mgr->pd_norm_start;
316 if (pd_bit_index >= dev->pd_mgr->max_normal_pd) {
317 return -EINVAL;
318 } else {
319 __clear_bit(pd_bit_index, dev->pd_mgr->pd_norm_bitmap);
320 dev->pd_mgr->pd_norm_count--;
321 }
322 }
323
324 return 0;
325}
326
Dan Carpenter004d18e2017-02-23 13:40:16 +0300327static int ocrdma_put_pd_num(struct ocrdma_dev *dev, u16 pd_id,
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530328 bool dpp_pool)
329{
330 int status;
331
332 mutex_lock(&dev->dev_lock);
333 status = _ocrdma_pd_mgr_put_bitmap(dev, pd_id, dpp_pool);
334 mutex_unlock(&dev->dev_lock);
335 return status;
336}
337
338static int ocrdma_get_pd_num(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
339{
340 u16 pd_idx = 0;
341 int status = 0;
342
343 mutex_lock(&dev->dev_lock);
344 if (pd->dpp_enabled) {
345 /* try allocating DPP PD, if not available then normal PD */
346 if (dev->pd_mgr->pd_dpp_count < dev->pd_mgr->max_dpp_pd) {
347 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, true);
348 pd->id = dev->pd_mgr->pd_dpp_start + pd_idx;
349 pd->dpp_page = dev->pd_mgr->dpp_page_index + pd_idx;
350 } else if (dev->pd_mgr->pd_norm_count <
351 dev->pd_mgr->max_normal_pd) {
352 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
353 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
354 pd->dpp_enabled = false;
355 } else {
356 status = -EINVAL;
357 }
358 } else {
359 if (dev->pd_mgr->pd_norm_count < dev->pd_mgr->max_normal_pd) {
360 pd_idx = _ocrdma_pd_mgr_get_bitmap(dev, false);
361 pd->id = dev->pd_mgr->pd_norm_start + pd_idx;
362 } else {
363 status = -EINVAL;
364 }
365 }
366 mutex_unlock(&dev->dev_lock);
367 return status;
368}
369
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200370static int _ocrdma_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
371 struct ocrdma_ucontext *uctx,
372 struct ib_udata *udata)
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530373{
Markus Elfring0ca4c392015-12-26 18:18:18 +0100374 int status;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530375
Mitesh Ahuja59582d82015-05-19 11:32:37 +0530376 if (udata && uctx && dev->attr.max_dpp_pds) {
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530377 pd->dpp_enabled =
Devesh Sharma21c33912014-02-04 11:56:56 +0530378 ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530379 pd->num_dpp_qp =
Devesh Sharmaa53d77a2014-06-10 19:32:17 +0530380 pd->dpp_enabled ? (dev->nic_info.db_page_size /
381 dev->attr.wqe_size) : 0;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530382 }
383
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200384 if (dev->pd_mgr->pd_prealloc_valid)
385 return ocrdma_get_pd_num(dev, pd);
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530386
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530387retry:
388 status = ocrdma_mbx_alloc_pd(dev, pd);
389 if (status) {
390 if (pd->dpp_enabled) {
391 pd->dpp_enabled = false;
392 pd->num_dpp_qp = 0;
393 goto retry;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530394 }
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200395 return status;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530396 }
397
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200398 return 0;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530399}
400
401static inline int is_ucontext_pd(struct ocrdma_ucontext *uctx,
402 struct ocrdma_pd *pd)
403{
Andrew F. Davis8b0c05d2017-12-05 13:15:53 -0600404 return (uctx->cntxt_pd == pd);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530405}
406
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200407static void _ocrdma_dealloc_pd(struct ocrdma_dev *dev,
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530408 struct ocrdma_pd *pd)
409{
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530410 if (dev->pd_mgr->pd_prealloc_valid)
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200411 ocrdma_put_pd_num(dev, pd->id, pd->dpp_enabled);
Mitesh Ahuja9ba13772014-12-18 14:12:57 +0530412 else
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200413 ocrdma_mbx_dealloc_pd(dev, pd);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530414}
415
416static int ocrdma_alloc_ucontext_pd(struct ocrdma_dev *dev,
417 struct ocrdma_ucontext *uctx,
418 struct ib_udata *udata)
419{
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200420 struct ib_device *ibdev = &dev->ibdev;
421 struct ib_pd *pd;
422 int status;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530423
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200424 pd = rdma_zalloc_drv_obj(ibdev, ib_pd);
425 if (!pd)
426 return -ENOMEM;
427
428 pd->device = ibdev;
429 uctx->cntxt_pd = get_ocrdma_pd(pd);
430
431 status = _ocrdma_alloc_pd(dev, uctx->cntxt_pd, uctx, udata);
432 if (status) {
433 kfree(uctx->cntxt_pd);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530434 goto err;
435 }
436
437 uctx->cntxt_pd->uctx = uctx;
438 uctx->cntxt_pd->ibpd.device = &dev->ibdev;
439err:
440 return status;
441}
442
Leon Romanovskya2a074e2019-02-12 20:39:16 +0200443static void ocrdma_dealloc_ucontext_pd(struct ocrdma_ucontext *uctx)
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530444{
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530445 struct ocrdma_pd *pd = uctx->cntxt_pd;
446 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
447
Mitesh Ahuja6dab0262014-06-10 19:32:21 +0530448 if (uctx->pd_in_use) {
449 pr_err("%s(%d) Freeing in use pdid=0x%x.\n",
450 __func__, dev->id, pd->id);
451 }
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200452 kfree(uctx->cntxt_pd);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530453 uctx->cntxt_pd = NULL;
Leon Romanovskya2a074e2019-02-12 20:39:16 +0200454 _ocrdma_dealloc_pd(dev, pd);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530455}
456
457static struct ocrdma_pd *ocrdma_get_ucontext_pd(struct ocrdma_ucontext *uctx)
458{
459 struct ocrdma_pd *pd = NULL;
460
461 mutex_lock(&uctx->mm_list_lock);
462 if (!uctx->pd_in_use) {
463 uctx->pd_in_use = true;
464 pd = uctx->cntxt_pd;
465 }
466 mutex_unlock(&uctx->mm_list_lock);
467
468 return pd;
469}
470
471static void ocrdma_release_ucontext_pd(struct ocrdma_ucontext *uctx)
472{
473 mutex_lock(&uctx->mm_list_lock);
474 uctx->pd_in_use = false;
475 mutex_unlock(&uctx->mm_list_lock);
476}
477
Leon Romanovskya2a074e2019-02-12 20:39:16 +0200478int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +0530479{
Leon Romanovskya2a074e2019-02-12 20:39:16 +0200480 struct ib_device *ibdev = uctx->device;
Parav Panditfe2caef2012-03-21 04:09:06 +0530481 int status;
Leon Romanovskya2a074e2019-02-12 20:39:16 +0200482 struct ocrdma_ucontext *ctx = get_ocrdma_ucontext(uctx);
483 struct ocrdma_alloc_ucontext_resp resp = {};
Parav Panditfe2caef2012-03-21 04:09:06 +0530484 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
485 struct pci_dev *pdev = dev->nic_info.pdev;
486 u32 map_len = roundup(sizeof(u32) * 2048, PAGE_SIZE);
487
488 if (!udata)
Leon Romanovskya2a074e2019-02-12 20:39:16 +0200489 return -EFAULT;
Parav Panditfe2caef2012-03-21 04:09:06 +0530490 INIT_LIST_HEAD(&ctx->mm_head);
491 mutex_init(&ctx->mm_list_lock);
492
Luis Chamberlain750afb02019-01-04 09:23:09 +0100493 ctx->ah_tbl.va = dma_alloc_coherent(&pdev->dev, map_len,
494 &ctx->ah_tbl.pa, GFP_KERNEL);
Leon Romanovskya2a074e2019-02-12 20:39:16 +0200495 if (!ctx->ah_tbl.va)
496 return -ENOMEM;
497
Parav Panditfe2caef2012-03-21 04:09:06 +0530498 ctx->ah_tbl.len = map_len;
499
500 resp.ah_tbl_len = ctx->ah_tbl.len;
Devesh Sharma1b76d382014-09-05 19:35:40 +0530501 resp.ah_tbl_page = virt_to_phys(ctx->ah_tbl.va);
Parav Panditfe2caef2012-03-21 04:09:06 +0530502
503 status = ocrdma_add_mmap(ctx, resp.ah_tbl_page, resp.ah_tbl_len);
504 if (status)
505 goto map_err;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530506
507 status = ocrdma_alloc_ucontext_pd(dev, ctx, udata);
508 if (status)
509 goto pd_err;
510
Parav Panditfe2caef2012-03-21 04:09:06 +0530511 resp.dev_id = dev->id;
512 resp.max_inline_data = dev->attr.max_inline_data;
513 resp.wqe_size = dev->attr.wqe_size;
514 resp.rqe_size = dev->attr.rqe_size;
515 resp.dpp_wqe_size = dev->attr.wqe_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530516
517 memcpy(resp.fw_ver, dev->attr.fw_ver, sizeof(resp.fw_ver));
518 status = ib_copy_to_udata(udata, &resp, sizeof(resp));
519 if (status)
520 goto cpy_err;
Leon Romanovskya2a074e2019-02-12 20:39:16 +0200521 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530522
523cpy_err:
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200524 ocrdma_dealloc_ucontext_pd(ctx);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530525pd_err:
Parav Panditfe2caef2012-03-21 04:09:06 +0530526 ocrdma_del_mmap(ctx, ctx->ah_tbl.pa, ctx->ah_tbl.len);
527map_err:
528 dma_free_coherent(&pdev->dev, ctx->ah_tbl.len, ctx->ah_tbl.va,
529 ctx->ah_tbl.pa);
Leon Romanovskya2a074e2019-02-12 20:39:16 +0200530 return status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530531}
532
Leon Romanovskya2a074e2019-02-12 20:39:16 +0200533void ocrdma_dealloc_ucontext(struct ib_ucontext *ibctx)
Parav Panditfe2caef2012-03-21 04:09:06 +0530534{
535 struct ocrdma_mm *mm, *tmp;
536 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ibctx);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530537 struct ocrdma_dev *dev = get_ocrdma_dev(ibctx->device);
538 struct pci_dev *pdev = dev->nic_info.pdev;
Parav Panditfe2caef2012-03-21 04:09:06 +0530539
Leon Romanovskya2a074e2019-02-12 20:39:16 +0200540 ocrdma_dealloc_ucontext_pd(uctx);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530541
Parav Panditfe2caef2012-03-21 04:09:06 +0530542 ocrdma_del_mmap(uctx, uctx->ah_tbl.pa, uctx->ah_tbl.len);
543 dma_free_coherent(&pdev->dev, uctx->ah_tbl.len, uctx->ah_tbl.va,
544 uctx->ah_tbl.pa);
545
546 list_for_each_entry_safe(mm, tmp, &uctx->mm_head, entry) {
547 list_del(&mm->entry);
548 kfree(mm);
549 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530550}
551
552int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
553{
554 struct ocrdma_ucontext *ucontext = get_ocrdma_ucontext(context);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530555 struct ocrdma_dev *dev = get_ocrdma_dev(context->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530556 unsigned long vm_page = vma->vm_pgoff << PAGE_SHIFT;
557 u64 unmapped_db = (u64) dev->nic_info.unmapped_db;
558 unsigned long len = (vma->vm_end - vma->vm_start);
Markus Elfring0ca4c392015-12-26 18:18:18 +0100559 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530560 bool found;
561
562 if (vma->vm_start & (PAGE_SIZE - 1))
563 return -EINVAL;
564 found = ocrdma_search_mmap(ucontext, vma->vm_pgoff << PAGE_SHIFT, len);
565 if (!found)
566 return -EINVAL;
567
568 if ((vm_page >= unmapped_db) && (vm_page <= (unmapped_db +
569 dev->nic_info.db_total_size)) &&
570 (len <= dev->nic_info.db_page_size)) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530571 if (vma->vm_flags & VM_READ)
572 return -EPERM;
573
574 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Parav Panditfe2caef2012-03-21 04:09:06 +0530575 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
576 len, vma->vm_page_prot);
577 } else if (dev->nic_info.dpp_unmapped_len &&
578 (vm_page >= (u64) dev->nic_info.dpp_unmapped_addr) &&
579 (vm_page <= (u64) (dev->nic_info.dpp_unmapped_addr +
580 dev->nic_info.dpp_unmapped_len)) &&
581 (len <= dev->nic_info.dpp_unmapped_len)) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530582 if (vma->vm_flags & VM_READ)
583 return -EPERM;
584
Parav Panditfe2caef2012-03-21 04:09:06 +0530585 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
586 status = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
587 len, vma->vm_page_prot);
588 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +0530589 status = remap_pfn_range(vma, vma->vm_start,
590 vma->vm_pgoff, len, vma->vm_page_prot);
591 }
592 return status;
593}
594
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530595static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
Parav Panditfe2caef2012-03-21 04:09:06 +0530596 struct ib_ucontext *ib_ctx,
597 struct ib_udata *udata)
598{
599 int status;
600 u64 db_page_addr;
Roland Dreierda496432012-04-16 11:32:17 -0700601 u64 dpp_page_addr = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530602 u32 db_page_size;
603 struct ocrdma_alloc_pd_uresp rsp;
604 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
605
Dan Carpenter63ea3742013-07-29 22:34:29 +0300606 memset(&rsp, 0, sizeof(rsp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530607 rsp.id = pd->id;
608 rsp.dpp_enabled = pd->dpp_enabled;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530609 db_page_addr = ocrdma_get_db_addr(dev, pd->id);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530610 db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530611
612 status = ocrdma_add_mmap(uctx, db_page_addr, db_page_size);
613 if (status)
614 return status;
615
616 if (pd->dpp_enabled) {
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530617 dpp_page_addr = dev->nic_info.dpp_unmapped_addr +
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530618 (pd->id * PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530619 status = ocrdma_add_mmap(uctx, dpp_page_addr,
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530620 PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530621 if (status)
622 goto dpp_map_err;
623 rsp.dpp_page_addr_hi = upper_32_bits(dpp_page_addr);
624 rsp.dpp_page_addr_lo = dpp_page_addr;
625 }
626
627 status = ib_copy_to_udata(udata, &rsp, sizeof(rsp));
628 if (status)
629 goto ucopy_err;
630
631 pd->uctx = uctx;
632 return 0;
633
634ucopy_err:
Roland Dreierda496432012-04-16 11:32:17 -0700635 if (pd->dpp_enabled)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530636 ocrdma_del_mmap(pd->uctx, dpp_page_addr, PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530637dpp_map_err:
638 ocrdma_del_mmap(pd->uctx, db_page_addr, db_page_size);
639 return status;
640}
641
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200642int ocrdma_alloc_pd(struct ib_pd *ibpd, struct ib_ucontext *context,
643 struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +0530644{
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200645 struct ib_device *ibdev = ibpd->device;
Parav Panditfe2caef2012-03-21 04:09:06 +0530646 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
647 struct ocrdma_pd *pd;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530648 struct ocrdma_ucontext *uctx = NULL;
Parav Panditfe2caef2012-03-21 04:09:06 +0530649 int status;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530650 u8 is_uctx_pd = false;
Parav Panditfe2caef2012-03-21 04:09:06 +0530651
Parav Panditfe2caef2012-03-21 04:09:06 +0530652 if (udata && context) {
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530653 uctx = get_ocrdma_ucontext(context);
654 pd = ocrdma_get_ucontext_pd(uctx);
655 if (pd) {
656 is_uctx_pd = true;
657 goto pd_mapping;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530658 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530659 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530660
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200661 pd = get_ocrdma_pd(ibpd);
662 status = _ocrdma_alloc_pd(dev, pd, uctx, udata);
663 if (status)
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530664 goto exit;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530665
666pd_mapping:
Parav Panditfe2caef2012-03-21 04:09:06 +0530667 if (udata && context) {
Naresh Gottumukkala45e86b32013-08-07 12:52:37 +0530668 status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
Parav Panditfe2caef2012-03-21 04:09:06 +0530669 if (status)
670 goto err;
671 }
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200672 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530673
674err:
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200675 if (is_uctx_pd)
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530676 ocrdma_release_ucontext_pd(uctx);
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200677 else
678 _ocrdma_dealloc_pd(dev, pd);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530679exit:
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200680 return status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530681}
682
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +0300683void ocrdma_dealloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +0530684{
685 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530686 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530687 struct ocrdma_ucontext *uctx = NULL;
Parav Panditfe2caef2012-03-21 04:09:06 +0530688 u64 usr_db;
689
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530690 uctx = pd->uctx;
691 if (uctx) {
Parav Panditfe2caef2012-03-21 04:09:06 +0530692 u64 dpp_db = dev->nic_info.dpp_unmapped_addr +
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530693 (pd->id * PAGE_SIZE);
Parav Panditfe2caef2012-03-21 04:09:06 +0530694 if (pd->dpp_enabled)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530695 ocrdma_del_mmap(pd->uctx, dpp_db, PAGE_SIZE);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530696 usr_db = ocrdma_get_db_addr(dev, pd->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530697 ocrdma_del_mmap(pd->uctx, usr_db, dev->nic_info.db_page_size);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530698
699 if (is_ucontext_pd(uctx, pd)) {
700 ocrdma_release_ucontext_pd(uctx);
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200701 return;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530702 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530703 }
Leon Romanovsky21a428a2019-02-03 14:55:51 +0200704 _ocrdma_dealloc_pd(dev, pd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530705}
706
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530707static int ocrdma_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
708 u32 pdid, int acc, u32 num_pbls, u32 addr_check)
Parav Panditfe2caef2012-03-21 04:09:06 +0530709{
710 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530711
Parav Panditfe2caef2012-03-21 04:09:06 +0530712 mr->hwmr.fr_mr = 0;
713 mr->hwmr.local_rd = 1;
714 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
715 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
716 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
717 mr->hwmr.mw_bind = (acc & IB_ACCESS_MW_BIND) ? 1 : 0;
718 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
719 mr->hwmr.num_pbls = num_pbls;
720
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530721 status = ocrdma_mbx_alloc_lkey(dev, &mr->hwmr, pdid, addr_check);
722 if (status)
723 return status;
724
Parav Panditfe2caef2012-03-21 04:09:06 +0530725 mr->ibmr.lkey = mr->hwmr.lkey;
726 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
727 mr->ibmr.rkey = mr->hwmr.lkey;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530728 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530729}
730
731struct ib_mr *ocrdma_get_dma_mr(struct ib_pd *ibpd, int acc)
732{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530733 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +0530734 struct ocrdma_mr *mr;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530735 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
736 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530737
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530738 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE)) {
739 pr_err("%s err, invalid access rights\n", __func__);
740 return ERR_PTR(-EINVAL);
741 }
742
743 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
744 if (!mr)
745 return ERR_PTR(-ENOMEM);
746
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530747 status = ocrdma_alloc_lkey(dev, mr, pd->id, acc, 0,
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530748 OCRDMA_ADDR_CHECK_DISABLE);
749 if (status) {
750 kfree(mr);
751 return ERR_PTR(status);
752 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530753
754 return &mr->ibmr;
755}
756
757static void ocrdma_free_mr_pbl_tbl(struct ocrdma_dev *dev,
758 struct ocrdma_hw_mr *mr)
759{
760 struct pci_dev *pdev = dev->nic_info.pdev;
761 int i = 0;
762
763 if (mr->pbl_table) {
764 for (i = 0; i < mr->num_pbls; i++) {
765 if (!mr->pbl_table[i].va)
766 continue;
767 dma_free_coherent(&pdev->dev, mr->pbl_size,
768 mr->pbl_table[i].va,
769 mr->pbl_table[i].pa);
770 }
771 kfree(mr->pbl_table);
772 mr->pbl_table = NULL;
773 }
774}
775
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530776static int ocrdma_get_pbl_info(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
777 u32 num_pbes)
Parav Panditfe2caef2012-03-21 04:09:06 +0530778{
779 u32 num_pbls = 0;
780 u32 idx = 0;
781 int status = 0;
782 u32 pbl_size;
783
784 do {
785 pbl_size = OCRDMA_MIN_HPAGE_SIZE * (1 << idx);
786 if (pbl_size > MAX_OCRDMA_PBL_SIZE) {
787 status = -EFAULT;
788 break;
789 }
790 num_pbls = roundup(num_pbes, (pbl_size / sizeof(u64)));
791 num_pbls = num_pbls / (pbl_size / sizeof(u64));
792 idx++;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530793 } while (num_pbls >= dev->attr.max_num_mr_pbl);
Parav Panditfe2caef2012-03-21 04:09:06 +0530794
795 mr->hwmr.num_pbes = num_pbes;
796 mr->hwmr.num_pbls = num_pbls;
797 mr->hwmr.pbl_size = pbl_size;
798 return status;
799}
800
801static int ocrdma_build_pbl_tbl(struct ocrdma_dev *dev, struct ocrdma_hw_mr *mr)
802{
803 int status = 0;
804 int i;
805 u32 dma_len = mr->pbl_size;
806 struct pci_dev *pdev = dev->nic_info.pdev;
807 void *va;
808 dma_addr_t pa;
809
Kees Cook6396bb22018-06-12 14:03:40 -0700810 mr->pbl_table = kcalloc(mr->num_pbls, sizeof(struct ocrdma_pbl),
811 GFP_KERNEL);
Parav Panditfe2caef2012-03-21 04:09:06 +0530812
813 if (!mr->pbl_table)
814 return -ENOMEM;
815
816 for (i = 0; i < mr->num_pbls; i++) {
Luis Chamberlain750afb02019-01-04 09:23:09 +0100817 va = dma_alloc_coherent(&pdev->dev, dma_len, &pa, GFP_KERNEL);
Parav Panditfe2caef2012-03-21 04:09:06 +0530818 if (!va) {
819 ocrdma_free_mr_pbl_tbl(dev, mr);
820 status = -ENOMEM;
821 break;
822 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530823 mr->pbl_table[i].va = va;
824 mr->pbl_table[i].pa = pa;
825 }
826 return status;
827}
828
829static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
830 u32 num_pbes)
831{
832 struct ocrdma_pbe *pbe;
Shiraz, Saleembe8c4562019-02-11 09:25:05 -0600833 struct sg_dma_page_iter sg_iter;
Parav Panditfe2caef2012-03-21 04:09:06 +0530834 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
835 struct ib_umem *umem = mr->umem;
Shiraz, Saleembe8c4562019-02-11 09:25:05 -0600836 int pbe_cnt, total_num_pbes = 0;
837 u64 pg_addr;
Parav Panditfe2caef2012-03-21 04:09:06 +0530838
839 if (!mr->hwmr.num_pbes)
840 return;
841
842 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
843 pbe_cnt = 0;
844
Shiraz, Saleembe8c4562019-02-11 09:25:05 -0600845 for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
846 /* store the page address in pbe */
847 pg_addr = sg_page_iter_dma_address(&sg_iter);
848 pbe->pa_lo = cpu_to_le32(pg_addr);
849 pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
850 pbe_cnt += 1;
851 total_num_pbes += 1;
852 pbe++;
Parav Panditfe2caef2012-03-21 04:09:06 +0530853
Shiraz, Saleembe8c4562019-02-11 09:25:05 -0600854 /* if done building pbes, issue the mbx cmd. */
855 if (total_num_pbes == num_pbes)
856 return;
Parav Panditfe2caef2012-03-21 04:09:06 +0530857
Shiraz, Saleembe8c4562019-02-11 09:25:05 -0600858 /* if the given pbl is full storing the pbes,
859 * move to next pbl.
860 */
861 if (pbe_cnt == (mr->hwmr.pbl_size / sizeof(u64))) {
862 pbl_tbl++;
863 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
864 pbe_cnt = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530865 }
866 }
867}
868
869struct ib_mr *ocrdma_reg_user_mr(struct ib_pd *ibpd, u64 start, u64 len,
870 u64 usr_addr, int acc, struct ib_udata *udata)
871{
872 int status = -ENOMEM;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +0530873 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530874 struct ocrdma_mr *mr;
875 struct ocrdma_pd *pd;
Parav Panditfe2caef2012-03-21 04:09:06 +0530876 u32 num_pbes;
877
878 pd = get_ocrdma_pd(ibpd);
Parav Panditfe2caef2012-03-21 04:09:06 +0530879
880 if (acc & IB_ACCESS_REMOTE_WRITE && !(acc & IB_ACCESS_LOCAL_WRITE))
881 return ERR_PTR(-EINVAL);
882
883 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
884 if (!mr)
885 return ERR_PTR(status);
Jason Gunthorpeb0ea0fa2019-01-09 11:15:16 +0200886 mr->umem = ib_umem_get(udata, start, len, acc, 0);
Parav Panditfe2caef2012-03-21 04:09:06 +0530887 if (IS_ERR(mr->umem)) {
888 status = -EFAULT;
889 goto umem_err;
890 }
891 num_pbes = ib_umem_page_count(mr->umem);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530892 status = ocrdma_get_pbl_info(dev, mr, num_pbes);
Parav Panditfe2caef2012-03-21 04:09:06 +0530893 if (status)
894 goto umem_err;
895
Shiraz, Saleembe8c4562019-02-11 09:25:05 -0600896 mr->hwmr.pbe_size = PAGE_SIZE;
Haggai Eran406f9e52014-12-11 17:04:12 +0200897 mr->hwmr.fbo = ib_umem_offset(mr->umem);
Parav Panditfe2caef2012-03-21 04:09:06 +0530898 mr->hwmr.va = usr_addr;
899 mr->hwmr.len = len;
900 mr->hwmr.remote_wr = (acc & IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
901 mr->hwmr.remote_rd = (acc & IB_ACCESS_REMOTE_READ) ? 1 : 0;
902 mr->hwmr.local_wr = (acc & IB_ACCESS_LOCAL_WRITE) ? 1 : 0;
903 mr->hwmr.local_rd = 1;
904 mr->hwmr.remote_atomic = (acc & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0;
905 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
906 if (status)
907 goto umem_err;
908 build_user_pbes(dev, mr, num_pbes);
909 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, acc);
910 if (status)
911 goto mbx_err;
Parav Panditfe2caef2012-03-21 04:09:06 +0530912 mr->ibmr.lkey = mr->hwmr.lkey;
913 if (mr->hwmr.remote_wr || mr->hwmr.remote_rd)
914 mr->ibmr.rkey = mr->hwmr.lkey;
915
916 return &mr->ibmr;
917
918mbx_err:
919 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
920umem_err:
921 kfree(mr);
922 return ERR_PTR(status);
923}
924
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +0300925int ocrdma_dereg_mr(struct ib_mr *ib_mr, struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +0530926{
927 struct ocrdma_mr *mr = get_ocrdma_mr(ib_mr);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530928 struct ocrdma_dev *dev = get_ocrdma_dev(ib_mr->device);
Parav Panditfe2caef2012-03-21 04:09:06 +0530929
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +0530930 (void) ocrdma_mbx_dealloc_lkey(dev, mr->hwmr.fr_mr, mr->hwmr.lkey);
Parav Panditfe2caef2012-03-21 04:09:06 +0530931
Sagi Grimberg2eaa1c52015-10-13 19:11:28 +0300932 kfree(mr->pages);
Selvin Xavier9d1878a2014-02-04 11:57:02 +0530933 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
Parav Panditfe2caef2012-03-21 04:09:06 +0530934
Parav Panditfe2caef2012-03-21 04:09:06 +0530935 /* it could be user registered memory. */
936 if (mr->umem)
937 ib_umem_release(mr->umem);
938 kfree(mr);
Mitesh Ahuja6dab0262014-06-10 19:32:21 +0530939
940 /* Don't stop cleanup, in case FW is unresponsive */
941 if (dev->mqe_ctx.fw_error_state) {
Mitesh Ahuja6dab0262014-06-10 19:32:21 +0530942 pr_err("%s(%d) fw not responding.\n",
943 __func__, dev->id);
944 }
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +0530945 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530946}
947
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530948static int ocrdma_copy_cq_uresp(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
949 struct ib_udata *udata,
Parav Panditfe2caef2012-03-21 04:09:06 +0530950 struct ib_ucontext *ib_ctx)
951{
952 int status;
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530953 struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
Parav Panditfe2caef2012-03-21 04:09:06 +0530954 struct ocrdma_create_cq_uresp uresp;
955
Dan Carpenter63ea3742013-07-29 22:34:29 +0300956 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +0530957 uresp.cq_id = cq->id;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +0530958 uresp.page_size = PAGE_ALIGN(cq->len);
Parav Panditfe2caef2012-03-21 04:09:06 +0530959 uresp.num_pages = 1;
960 uresp.max_hw_cqe = cq->max_hw_cqe;
Devesh Sharma1b76d382014-09-05 19:35:40 +0530961 uresp.page_addr[0] = virt_to_phys(cq->va);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530962 uresp.db_page_addr = ocrdma_get_db_addr(dev, uctx->cntxt_pd->id);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530963 uresp.db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +0530964 uresp.phase_change = cq->phase_change ? 1 : 0;
965 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
966 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +0000967 pr_err("%s(%d) copy error cqid=0x%x.\n",
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +0530968 __func__, dev->id, cq->id);
Parav Panditfe2caef2012-03-21 04:09:06 +0530969 goto err;
970 }
Parav Panditfe2caef2012-03-21 04:09:06 +0530971 status = ocrdma_add_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
972 if (status)
973 goto err;
974 status = ocrdma_add_mmap(uctx, uresp.page_addr[0], uresp.page_size);
975 if (status) {
976 ocrdma_del_mmap(uctx, uresp.db_page_addr, uresp.db_page_size);
977 goto err;
978 }
979 cq->ucontext = uctx;
980err:
981 return status;
982}
983
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300984struct ib_cq *ocrdma_create_cq(struct ib_device *ibdev,
985 const struct ib_cq_init_attr *attr,
Parav Panditfe2caef2012-03-21 04:09:06 +0530986 struct ib_ucontext *ib_ctx,
987 struct ib_udata *udata)
988{
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300989 int entries = attr->cqe;
Parav Panditfe2caef2012-03-21 04:09:06 +0530990 struct ocrdma_cq *cq;
991 struct ocrdma_dev *dev = get_ocrdma_dev(ibdev);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +0530992 struct ocrdma_ucontext *uctx = NULL;
993 u16 pd_id = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +0530994 int status;
995 struct ocrdma_create_cq_ureq ureq;
996
Matan Barakbcf4c1e2015-06-11 16:35:20 +0300997 if (attr->flags)
998 return ERR_PTR(-EINVAL);
999
Parav Panditfe2caef2012-03-21 04:09:06 +05301000 if (udata) {
1001 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1002 return ERR_PTR(-EFAULT);
1003 } else
1004 ureq.dpp_cq = 0;
1005 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
1006 if (!cq)
1007 return ERR_PTR(-ENOMEM);
1008
1009 spin_lock_init(&cq->cq_lock);
1010 spin_lock_init(&cq->comp_handler_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301011 INIT_LIST_HEAD(&cq->sq_head);
1012 INIT_LIST_HEAD(&cq->rq_head);
Parav Panditfe2caef2012-03-21 04:09:06 +05301013
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301014 if (ib_ctx) {
1015 uctx = get_ocrdma_ucontext(ib_ctx);
1016 pd_id = uctx->cntxt_pd->id;
1017 }
1018
1019 status = ocrdma_mbx_create_cq(dev, cq, entries, ureq.dpp_cq, pd_id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301020 if (status) {
1021 kfree(cq);
1022 return ERR_PTR(status);
1023 }
1024 if (ib_ctx) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301025 status = ocrdma_copy_cq_uresp(dev, cq, udata, ib_ctx);
Parav Panditfe2caef2012-03-21 04:09:06 +05301026 if (status)
1027 goto ctx_err;
1028 }
1029 cq->phase = OCRDMA_CQE_VALID;
Parav Panditfe2caef2012-03-21 04:09:06 +05301030 dev->cq_tbl[cq->id] = cq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301031 return &cq->ibcq;
1032
1033ctx_err:
1034 ocrdma_mbx_destroy_cq(dev, cq);
1035 kfree(cq);
1036 return ERR_PTR(status);
1037}
1038
1039int ocrdma_resize_cq(struct ib_cq *ibcq, int new_cnt,
1040 struct ib_udata *udata)
1041{
1042 int status = 0;
1043 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
1044
1045 if (new_cnt < 1 || new_cnt > cq->max_hw_cqe) {
1046 status = -EINVAL;
1047 return status;
1048 }
1049 ibcq->cqe = new_cnt;
1050 return status;
1051}
1052
Devesh Sharmaea617622014-02-04 11:56:54 +05301053static void ocrdma_flush_cq(struct ocrdma_cq *cq)
1054{
1055 int cqe_cnt;
1056 int valid_count = 0;
1057 unsigned long flags;
1058
1059 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
1060 struct ocrdma_cqe *cqe = NULL;
1061
1062 cqe = cq->va;
1063 cqe_cnt = cq->cqe_cnt;
1064
1065 /* Last irq might have scheduled a polling thread
1066 * sync-up with it before hard flushing.
1067 */
1068 spin_lock_irqsave(&cq->cq_lock, flags);
1069 while (cqe_cnt) {
1070 if (is_cqe_valid(cq, cqe))
1071 valid_count++;
1072 cqe++;
1073 cqe_cnt--;
1074 }
1075 ocrdma_ring_cq_db(dev, cq->id, false, false, valid_count);
1076 spin_unlock_irqrestore(&cq->cq_lock, flags);
1077}
1078
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001079int ocrdma_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +05301080{
Parav Panditfe2caef2012-03-21 04:09:06 +05301081 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
Devesh Sharmaea617622014-02-04 11:56:54 +05301082 struct ocrdma_eq *eq = NULL;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301083 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301084 int pdid = 0;
Devesh Sharmaea617622014-02-04 11:56:54 +05301085 u32 irq, indx;
1086
1087 dev->cq_tbl[cq->id] = NULL;
1088 indx = ocrdma_get_eq_table_index(dev, cq->eqn);
ssh10db287ec2016-12-24 21:50:06 +05301089 BUG_ON(indx == -EINVAL);
Devesh Sharmaea617622014-02-04 11:56:54 +05301090
1091 eq = &dev->eq_tbl[indx];
1092 irq = ocrdma_get_irq(dev, eq);
1093 synchronize_irq(irq);
1094 ocrdma_flush_cq(cq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301095
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +05301096 (void)ocrdma_mbx_destroy_cq(dev, cq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301097 if (cq->ucontext) {
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301098 pdid = cq->ucontext->cntxt_pd->id;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301099 ocrdma_del_mmap(cq->ucontext, (u64) cq->pa,
1100 PAGE_ALIGN(cq->len));
Naresh Gottumukkalacffce992013-08-26 15:27:44 +05301101 ocrdma_del_mmap(cq->ucontext,
1102 ocrdma_get_db_addr(dev, pdid),
Parav Panditfe2caef2012-03-21 04:09:06 +05301103 dev->nic_info.db_page_size);
1104 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301105
1106 kfree(cq);
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +05301107 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301108}
1109
1110static int ocrdma_add_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1111{
1112 int status = -EINVAL;
1113
1114 if (qp->id < OCRDMA_MAX_QP && dev->qp_tbl[qp->id] == NULL) {
1115 dev->qp_tbl[qp->id] = qp;
1116 status = 0;
1117 }
1118 return status;
1119}
1120
1121static void ocrdma_del_qpn_map(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
1122{
1123 dev->qp_tbl[qp->id] = NULL;
1124}
1125
1126static int ocrdma_check_qp_params(struct ib_pd *ibpd, struct ocrdma_dev *dev,
Shamir Rabinovitche00b64f2018-12-17 17:15:18 +02001127 struct ib_qp_init_attr *attrs,
1128 struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +05301129{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301130 if ((attrs->qp_type != IB_QPT_GSI) &&
1131 (attrs->qp_type != IB_QPT_RC) &&
1132 (attrs->qp_type != IB_QPT_UC) &&
1133 (attrs->qp_type != IB_QPT_UD)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001134 pr_err("%s(%d) unsupported qp type=0x%x requested\n",
1135 __func__, dev->id, attrs->qp_type);
Parav Panditfe2caef2012-03-21 04:09:06 +05301136 return -EINVAL;
1137 }
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301138 /* Skip the check for QP1 to support CM size of 128 */
1139 if ((attrs->qp_type != IB_QPT_GSI) &&
1140 (attrs->cap.max_send_wr > dev->attr.max_wqe)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001141 pr_err("%s(%d) unsupported send_wr=0x%x requested\n",
1142 __func__, dev->id, attrs->cap.max_send_wr);
1143 pr_err("%s(%d) supported send_wr=0x%x\n",
1144 __func__, dev->id, dev->attr.max_wqe);
Parav Panditfe2caef2012-03-21 04:09:06 +05301145 return -EINVAL;
1146 }
1147 if (!attrs->srq && (attrs->cap.max_recv_wr > dev->attr.max_rqe)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001148 pr_err("%s(%d) unsupported recv_wr=0x%x requested\n",
1149 __func__, dev->id, attrs->cap.max_recv_wr);
1150 pr_err("%s(%d) supported recv_wr=0x%x\n",
1151 __func__, dev->id, dev->attr.max_rqe);
Parav Panditfe2caef2012-03-21 04:09:06 +05301152 return -EINVAL;
1153 }
1154 if (attrs->cap.max_inline_data > dev->attr.max_inline_data) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001155 pr_err("%s(%d) unsupported inline data size=0x%x requested\n",
1156 __func__, dev->id, attrs->cap.max_inline_data);
1157 pr_err("%s(%d) supported inline data size=0x%x\n",
1158 __func__, dev->id, dev->attr.max_inline_data);
Parav Panditfe2caef2012-03-21 04:09:06 +05301159 return -EINVAL;
1160 }
1161 if (attrs->cap.max_send_sge > dev->attr.max_send_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001162 pr_err("%s(%d) unsupported send_sge=0x%x requested\n",
1163 __func__, dev->id, attrs->cap.max_send_sge);
1164 pr_err("%s(%d) supported send_sge=0x%x\n",
1165 __func__, dev->id, dev->attr.max_send_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05301166 return -EINVAL;
1167 }
1168 if (attrs->cap.max_recv_sge > dev->attr.max_recv_sge) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001169 pr_err("%s(%d) unsupported recv_sge=0x%x requested\n",
1170 __func__, dev->id, attrs->cap.max_recv_sge);
1171 pr_err("%s(%d) supported recv_sge=0x%x\n",
1172 __func__, dev->id, dev->attr.max_recv_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05301173 return -EINVAL;
1174 }
1175 /* unprivileged user space cannot create special QP */
Shamir Rabinovitche00b64f2018-12-17 17:15:18 +02001176 if (udata && attrs->qp_type == IB_QPT_GSI) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001177 pr_err
Parav Panditfe2caef2012-03-21 04:09:06 +05301178 ("%s(%d) Userspace can't create special QPs of type=0x%x\n",
1179 __func__, dev->id, attrs->qp_type);
1180 return -EINVAL;
1181 }
1182 /* allow creating only one GSI type of QP */
1183 if (attrs->qp_type == IB_QPT_GSI && dev->gsi_qp_created) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001184 pr_err("%s(%d) GSI special QPs already created.\n",
1185 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301186 return -EINVAL;
1187 }
1188 /* verify consumer QPs are not trying to use GSI QP's CQ */
1189 if ((attrs->qp_type != IB_QPT_GSI) && (dev->gsi_qp_created)) {
1190 if ((dev->gsi_sqcq == get_ocrdma_cq(attrs->send_cq)) ||
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301191 (dev->gsi_rqcq == get_ocrdma_cq(attrs->recv_cq))) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001192 pr_err("%s(%d) Consumer QP cannot use GSI CQs.\n",
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301193 __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301194 return -EINVAL;
1195 }
1196 }
1197 return 0;
1198}
1199
1200static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
1201 struct ib_udata *udata, int dpp_offset,
1202 int dpp_credit_lmt, int srq)
1203{
Markus Elfring0ca4c392015-12-26 18:18:18 +01001204 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +05301205 u64 usr_db;
1206 struct ocrdma_create_qp_uresp uresp;
Parav Panditfe2caef2012-03-21 04:09:06 +05301207 struct ocrdma_pd *pd = qp->pd;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301208 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301209
1210 memset(&uresp, 0, sizeof(uresp));
1211 usr_db = dev->nic_info.unmapped_db +
1212 (pd->id * dev->nic_info.db_page_size);
1213 uresp.qp_id = qp->id;
1214 uresp.sq_dbid = qp->sq.dbid;
1215 uresp.num_sq_pages = 1;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301216 uresp.sq_page_size = PAGE_ALIGN(qp->sq.len);
Devesh Sharma1b76d382014-09-05 19:35:40 +05301217 uresp.sq_page_addr[0] = virt_to_phys(qp->sq.va);
Parav Panditfe2caef2012-03-21 04:09:06 +05301218 uresp.num_wqe_allocated = qp->sq.max_cnt;
1219 if (!srq) {
1220 uresp.rq_dbid = qp->rq.dbid;
1221 uresp.num_rq_pages = 1;
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301222 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len);
Devesh Sharma1b76d382014-09-05 19:35:40 +05301223 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va);
Parav Panditfe2caef2012-03-21 04:09:06 +05301224 uresp.num_rqe_allocated = qp->rq.max_cnt;
1225 }
1226 uresp.db_page_addr = usr_db;
1227 uresp.db_page_size = dev->nic_info.db_page_size;
Devesh Sharma2df84fa82014-02-04 11:56:55 +05301228 uresp.db_sq_offset = OCRDMA_DB_GEN2_SQ_OFFSET;
1229 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
1230 uresp.db_shift = OCRDMA_DB_RQ_SHIFT;
Parav Panditfe2caef2012-03-21 04:09:06 +05301231
1232 if (qp->dpp_enabled) {
1233 uresp.dpp_credit = dpp_credit_lmt;
1234 uresp.dpp_offset = dpp_offset;
1235 }
1236 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1237 if (status) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001238 pr_err("%s(%d) user copy error.\n", __func__, dev->id);
Parav Panditfe2caef2012-03-21 04:09:06 +05301239 goto err;
1240 }
1241 status = ocrdma_add_mmap(pd->uctx, uresp.sq_page_addr[0],
1242 uresp.sq_page_size);
1243 if (status)
1244 goto err;
1245
1246 if (!srq) {
1247 status = ocrdma_add_mmap(pd->uctx, uresp.rq_page_addr[0],
1248 uresp.rq_page_size);
1249 if (status)
1250 goto rq_map_err;
1251 }
1252 return status;
1253rq_map_err:
1254 ocrdma_del_mmap(pd->uctx, uresp.sq_page_addr[0], uresp.sq_page_size);
1255err:
1256 return status;
1257}
1258
1259static void ocrdma_set_qp_db(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
1260 struct ocrdma_pd *pd)
1261{
Devesh Sharma21c33912014-02-04 11:56:56 +05301262 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301263 qp->sq_db = dev->nic_info.db +
1264 (pd->id * dev->nic_info.db_page_size) +
1265 OCRDMA_DB_GEN2_SQ_OFFSET;
1266 qp->rq_db = dev->nic_info.db +
1267 (pd->id * dev->nic_info.db_page_size) +
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301268 OCRDMA_DB_GEN2_RQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301269 } else {
1270 qp->sq_db = dev->nic_info.db +
1271 (pd->id * dev->nic_info.db_page_size) +
1272 OCRDMA_DB_SQ_OFFSET;
1273 qp->rq_db = dev->nic_info.db +
1274 (pd->id * dev->nic_info.db_page_size) +
1275 OCRDMA_DB_RQ_OFFSET;
1276 }
1277}
1278
1279static int ocrdma_alloc_wr_id_tbl(struct ocrdma_qp *qp)
1280{
1281 qp->wqe_wr_id_tbl =
Kees Cook6396bb22018-06-12 14:03:40 -07001282 kcalloc(qp->sq.max_cnt, sizeof(*(qp->wqe_wr_id_tbl)),
Parav Panditfe2caef2012-03-21 04:09:06 +05301283 GFP_KERNEL);
1284 if (qp->wqe_wr_id_tbl == NULL)
1285 return -ENOMEM;
1286 qp->rqe_wr_id_tbl =
Kees Cook6396bb22018-06-12 14:03:40 -07001287 kcalloc(qp->rq.max_cnt, sizeof(u64), GFP_KERNEL);
Parav Panditfe2caef2012-03-21 04:09:06 +05301288 if (qp->rqe_wr_id_tbl == NULL)
1289 return -ENOMEM;
1290
1291 return 0;
1292}
1293
1294static void ocrdma_set_qp_init_params(struct ocrdma_qp *qp,
1295 struct ocrdma_pd *pd,
1296 struct ib_qp_init_attr *attrs)
1297{
1298 qp->pd = pd;
1299 spin_lock_init(&qp->q_lock);
1300 INIT_LIST_HEAD(&qp->sq_entry);
1301 INIT_LIST_HEAD(&qp->rq_entry);
1302
1303 qp->qp_type = attrs->qp_type;
1304 qp->cap_flags = OCRDMA_QP_INB_RD | OCRDMA_QP_INB_WR;
1305 qp->max_inline_data = attrs->cap.max_inline_data;
1306 qp->sq.max_sges = attrs->cap.max_send_sge;
1307 qp->rq.max_sges = attrs->cap.max_recv_sge;
1308 qp->state = OCRDMA_QPS_RST;
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05301309 qp->signaled = (attrs->sq_sig_type == IB_SIGNAL_ALL_WR) ? true : false;
Parav Panditfe2caef2012-03-21 04:09:06 +05301310}
1311
Parav Panditfe2caef2012-03-21 04:09:06 +05301312static void ocrdma_store_gsi_qp_cq(struct ocrdma_dev *dev,
1313 struct ib_qp_init_attr *attrs)
1314{
1315 if (attrs->qp_type == IB_QPT_GSI) {
1316 dev->gsi_qp_created = 1;
1317 dev->gsi_sqcq = get_ocrdma_cq(attrs->send_cq);
1318 dev->gsi_rqcq = get_ocrdma_cq(attrs->recv_cq);
1319 }
1320}
1321
1322struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
1323 struct ib_qp_init_attr *attrs,
1324 struct ib_udata *udata)
1325{
1326 int status;
1327 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
1328 struct ocrdma_qp *qp;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301329 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301330 struct ocrdma_create_qp_ureq ureq;
1331 u16 dpp_credit_lmt, dpp_offset;
1332
Shamir Rabinovitche00b64f2018-12-17 17:15:18 +02001333 status = ocrdma_check_qp_params(ibpd, dev, attrs, udata);
Parav Panditfe2caef2012-03-21 04:09:06 +05301334 if (status)
1335 goto gen_err;
1336
1337 memset(&ureq, 0, sizeof(ureq));
1338 if (udata) {
1339 if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
1340 return ERR_PTR(-EFAULT);
1341 }
1342 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1343 if (!qp) {
1344 status = -ENOMEM;
1345 goto gen_err;
1346 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301347 ocrdma_set_qp_init_params(qp, pd, attrs);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301348 if (udata == NULL)
1349 qp->cap_flags |= (OCRDMA_QP_MW_BIND | OCRDMA_QP_LKEY0 |
1350 OCRDMA_QP_FAST_REG);
Parav Panditfe2caef2012-03-21 04:09:06 +05301351
1352 mutex_lock(&dev->dev_lock);
1353 status = ocrdma_mbx_create_qp(qp, attrs, ureq.enable_dpp_cq,
1354 ureq.dpp_cq_id,
1355 &dpp_offset, &dpp_credit_lmt);
1356 if (status)
1357 goto mbx_err;
1358
1359 /* user space QP's wr_id table are managed in library */
1360 if (udata == NULL) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301361 status = ocrdma_alloc_wr_id_tbl(qp);
1362 if (status)
1363 goto map_err;
1364 }
1365
1366 status = ocrdma_add_qpn_map(dev, qp);
1367 if (status)
1368 goto map_err;
1369 ocrdma_set_qp_db(dev, qp, pd);
1370 if (udata) {
1371 status = ocrdma_copy_qp_uresp(qp, udata, dpp_offset,
1372 dpp_credit_lmt,
1373 (attrs->srq != NULL));
1374 if (status)
1375 goto cpy_err;
1376 }
1377 ocrdma_store_gsi_qp_cq(dev, attrs);
Gottumukkala, Naresh27159f52013-06-05 08:50:46 +00001378 qp->ibqp.qp_num = qp->id;
Parav Panditfe2caef2012-03-21 04:09:06 +05301379 mutex_unlock(&dev->dev_lock);
1380 return &qp->ibqp;
1381
1382cpy_err:
1383 ocrdma_del_qpn_map(dev, qp);
1384map_err:
1385 ocrdma_mbx_destroy_qp(dev, qp);
1386mbx_err:
1387 mutex_unlock(&dev->dev_lock);
1388 kfree(qp->wqe_wr_id_tbl);
1389 kfree(qp->rqe_wr_id_tbl);
1390 kfree(qp);
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001391 pr_err("%s(%d) error=%d\n", __func__, dev->id, status);
Parav Panditfe2caef2012-03-21 04:09:06 +05301392gen_err:
1393 return ERR_PTR(status);
1394}
1395
1396int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1397 int attr_mask)
1398{
1399 int status = 0;
1400 struct ocrdma_qp *qp;
1401 struct ocrdma_dev *dev;
1402 enum ib_qp_state old_qps;
1403
1404 qp = get_ocrdma_qp(ibqp);
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301405 dev = get_ocrdma_dev(ibqp->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301406 if (attr_mask & IB_QP_STATE)
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05301407 status = ocrdma_qp_state_change(qp, attr->qp_state, &old_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +05301408 /* if new and previous states are same hw doesn't need to
1409 * know about it.
1410 */
1411 if (status < 0)
1412 return status;
Markus Elfring95f60bb2015-12-26 18:40:43 +01001413 return ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask);
Parav Panditfe2caef2012-03-21 04:09:06 +05301414}
1415
1416int ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1417 int attr_mask, struct ib_udata *udata)
1418{
1419 unsigned long flags;
1420 int status = -EINVAL;
1421 struct ocrdma_qp *qp;
1422 struct ocrdma_dev *dev;
1423 enum ib_qp_state old_qps, new_qps;
1424
1425 qp = get_ocrdma_qp(ibqp);
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301426 dev = get_ocrdma_dev(ibqp->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301427
1428 /* syncronize with multiple context trying to change, retrive qps */
1429 mutex_lock(&dev->dev_lock);
1430 /* syncronize with wqe, rqe posting and cqe processing contexts */
1431 spin_lock_irqsave(&qp->q_lock, flags);
1432 old_qps = get_ibqp_state(qp->state);
1433 if (attr_mask & IB_QP_STATE)
1434 new_qps = attr->qp_state;
1435 else
1436 new_qps = old_qps;
1437 spin_unlock_irqrestore(&qp->q_lock, flags);
1438
Kamal Heibd31131b2018-10-02 16:11:21 +03001439 if (!ib_modify_qp_is_ok(old_qps, new_qps, ibqp->qp_type, attr_mask)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001440 pr_err("%s(%d) invalid attribute mask=0x%x specified for\n"
1441 "qpn=0x%x of type=0x%x old_qps=0x%x, new_qps=0x%x\n",
1442 __func__, dev->id, attr_mask, qp->id, ibqp->qp_type,
1443 old_qps, new_qps);
Parav Panditfe2caef2012-03-21 04:09:06 +05301444 goto param_err;
1445 }
1446
1447 status = _ocrdma_modify_qp(ibqp, attr, attr_mask);
1448 if (status > 0)
1449 status = 0;
1450param_err:
1451 mutex_unlock(&dev->dev_lock);
1452 return status;
1453}
1454
1455static enum ib_mtu ocrdma_mtu_int_to_enum(u16 mtu)
1456{
1457 switch (mtu) {
1458 case 256:
1459 return IB_MTU_256;
1460 case 512:
1461 return IB_MTU_512;
1462 case 1024:
1463 return IB_MTU_1024;
1464 case 2048:
1465 return IB_MTU_2048;
1466 case 4096:
1467 return IB_MTU_4096;
1468 default:
1469 return IB_MTU_1024;
1470 }
1471}
1472
1473static int ocrdma_to_ib_qp_acc_flags(int qp_cap_flags)
1474{
1475 int ib_qp_acc_flags = 0;
1476
1477 if (qp_cap_flags & OCRDMA_QP_INB_WR)
1478 ib_qp_acc_flags |= IB_ACCESS_REMOTE_WRITE;
1479 if (qp_cap_flags & OCRDMA_QP_INB_RD)
1480 ib_qp_acc_flags |= IB_ACCESS_LOCAL_WRITE;
1481 return ib_qp_acc_flags;
1482}
1483
1484int ocrdma_query_qp(struct ib_qp *ibqp,
1485 struct ib_qp_attr *qp_attr,
1486 int attr_mask, struct ib_qp_init_attr *qp_init_attr)
1487{
1488 int status;
1489 u32 qp_state;
1490 struct ocrdma_qp_params params;
1491 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301492 struct ocrdma_dev *dev = get_ocrdma_dev(ibqp->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301493
1494 memset(&params, 0, sizeof(params));
1495 mutex_lock(&dev->dev_lock);
1496 status = ocrdma_mbx_query_qp(dev, qp, &params);
1497 mutex_unlock(&dev->dev_lock);
1498 if (status)
1499 goto mbx_err;
Mitesh Ahuja95bf0092014-12-03 11:36:33 +05301500 if (qp->qp_type == IB_QPT_UD)
1501 qp_attr->qkey = params.qkey;
Parav Panditfe2caef2012-03-21 04:09:06 +05301502 qp_attr->path_mtu =
1503 ocrdma_mtu_int_to_enum(params.path_mtu_pkey_indx &
1504 OCRDMA_QP_PARAMS_PATH_MTU_MASK) >>
1505 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT;
1506 qp_attr->path_mig_state = IB_MIG_MIGRATED;
1507 qp_attr->rq_psn = params.hop_lmt_rq_psn & OCRDMA_QP_PARAMS_RQ_PSN_MASK;
1508 qp_attr->sq_psn = params.tclass_sq_psn & OCRDMA_QP_PARAMS_SQ_PSN_MASK;
1509 qp_attr->dest_qp_num =
1510 params.ack_to_rnr_rtc_dest_qpn & OCRDMA_QP_PARAMS_DEST_QPN_MASK;
1511
1512 qp_attr->qp_access_flags = ocrdma_to_ib_qp_acc_flags(qp->cap_flags);
1513 qp_attr->cap.max_send_wr = qp->sq.max_cnt - 1;
1514 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1;
1515 qp_attr->cap.max_send_sge = qp->sq.max_sges;
1516 qp_attr->cap.max_recv_sge = qp->rq.max_sges;
Naresh Gottumukkalac43e9ab2013-08-26 15:27:46 +05301517 qp_attr->cap.max_inline_data = qp->max_inline_data;
Parav Panditfe2caef2012-03-21 04:09:06 +05301518 qp_init_attr->cap = qp_attr->cap;
Dasaratharaman Chandramouli44c58482017-04-29 14:41:29 -04001519 qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE;
Parav Panditfe2caef2012-03-21 04:09:06 +05301520
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001521 rdma_ah_set_grh(&qp_attr->ah_attr, NULL,
1522 params.rnt_rc_sl_fl &
1523 OCRDMA_QP_PARAMS_FLOW_LABEL_MASK,
1524 qp->sgid_idx,
1525 (params.hop_lmt_rq_psn &
1526 OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
1527 OCRDMA_QP_PARAMS_HOP_LMT_SHIFT,
1528 (params.tclass_sq_psn &
1529 OCRDMA_QP_PARAMS_TCLASS_MASK) >>
1530 OCRDMA_QP_PARAMS_TCLASS_SHIFT);
1531 rdma_ah_set_dgid_raw(&qp_attr->ah_attr, &params.dgid[0]);
1532
1533 rdma_ah_set_port_num(&qp_attr->ah_attr, 1);
1534 rdma_ah_set_sl(&qp_attr->ah_attr, (params.rnt_rc_sl_fl &
1535 OCRDMA_QP_PARAMS_SL_MASK) >>
1536 OCRDMA_QP_PARAMS_SL_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05301537 qp_attr->timeout = (params.ack_to_rnr_rtc_dest_qpn &
1538 OCRDMA_QP_PARAMS_ACK_TIMEOUT_MASK) >>
1539 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
1540 qp_attr->rnr_retry = (params.ack_to_rnr_rtc_dest_qpn &
1541 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK) >>
1542 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT;
1543 qp_attr->retry_cnt =
1544 (params.rnt_rc_sl_fl & OCRDMA_QP_PARAMS_RETRY_CNT_MASK) >>
1545 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT;
1546 qp_attr->min_rnr_timer = 0;
1547 qp_attr->pkey_index = 0;
1548 qp_attr->port_num = 1;
Dasaratharaman Chandramoulid8966fc2017-04-29 14:41:28 -04001549 rdma_ah_set_path_bits(&qp_attr->ah_attr, 0);
1550 rdma_ah_set_static_rate(&qp_attr->ah_attr, 0);
Parav Panditfe2caef2012-03-21 04:09:06 +05301551 qp_attr->alt_pkey_index = 0;
1552 qp_attr->alt_port_num = 0;
1553 qp_attr->alt_timeout = 0;
1554 memset(&qp_attr->alt_ah_attr, 0, sizeof(qp_attr->alt_ah_attr));
1555 qp_state = (params.max_sge_recv_flags & OCRDMA_QP_PARAMS_STATE_MASK) >>
1556 OCRDMA_QP_PARAMS_STATE_SHIFT;
Padmanabh Ratnakar43c706b2014-12-18 14:13:00 +05301557 qp_attr->qp_state = get_ibqp_state(qp_state);
1558 qp_attr->cur_qp_state = qp_attr->qp_state;
Parav Panditfe2caef2012-03-21 04:09:06 +05301559 qp_attr->sq_draining = (qp_state == OCRDMA_QPS_SQ_DRAINING) ? 1 : 0;
1560 qp_attr->max_dest_rd_atomic =
1561 params.max_ord_ird >> OCRDMA_QP_PARAMS_MAX_ORD_SHIFT;
1562 qp_attr->max_rd_atomic =
1563 params.max_ord_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK;
1564 qp_attr->en_sqd_async_notify = (params.max_sge_recv_flags &
1565 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC) ? 1 : 0;
Padmanabh Ratnakar43c706b2014-12-18 14:13:00 +05301566 /* Sync driver QP state with FW */
1567 ocrdma_qp_state_change(qp, qp_attr->qp_state, NULL);
Parav Panditfe2caef2012-03-21 04:09:06 +05301568mbx_err:
1569 return status;
1570}
1571
Rasmus Villemoesf3070e72015-01-16 15:39:56 +01001572static void ocrdma_srq_toggle_bit(struct ocrdma_srq *srq, unsigned int idx)
Parav Panditfe2caef2012-03-21 04:09:06 +05301573{
Rasmus Villemoesf3070e72015-01-16 15:39:56 +01001574 unsigned int i = idx / 32;
1575 u32 mask = (1U << (idx % 32));
Parav Panditfe2caef2012-03-21 04:09:06 +05301576
Rasmus Villemoesba64fdc2015-01-16 15:39:55 +01001577 srq->idx_bit_fields[i] ^= mask;
Parav Panditfe2caef2012-03-21 04:09:06 +05301578}
1579
1580static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1581{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301582 return ((q->max_wqe_idx - q->head) + q->tail) % q->max_cnt;
Parav Panditfe2caef2012-03-21 04:09:06 +05301583}
1584
1585static int is_hw_sq_empty(struct ocrdma_qp *qp)
1586{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301587 return (qp->sq.tail == qp->sq.head);
Parav Panditfe2caef2012-03-21 04:09:06 +05301588}
1589
1590static int is_hw_rq_empty(struct ocrdma_qp *qp)
1591{
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301592 return (qp->rq.tail == qp->rq.head);
Parav Panditfe2caef2012-03-21 04:09:06 +05301593}
1594
1595static void *ocrdma_hwq_head(struct ocrdma_qp_hwq_info *q)
1596{
1597 return q->va + (q->head * q->entry_size);
1598}
1599
1600static void *ocrdma_hwq_head_from_idx(struct ocrdma_qp_hwq_info *q,
1601 u32 idx)
1602{
1603 return q->va + (idx * q->entry_size);
1604}
1605
1606static void ocrdma_hwq_inc_head(struct ocrdma_qp_hwq_info *q)
1607{
1608 q->head = (q->head + 1) & q->max_wqe_idx;
1609}
1610
1611static void ocrdma_hwq_inc_tail(struct ocrdma_qp_hwq_info *q)
1612{
1613 q->tail = (q->tail + 1) & q->max_wqe_idx;
1614}
1615
1616/* discard the cqe for a given QP */
1617static void ocrdma_discard_cqes(struct ocrdma_qp *qp, struct ocrdma_cq *cq)
1618{
1619 unsigned long cq_flags;
1620 unsigned long flags;
1621 int discard_cnt = 0;
1622 u32 cur_getp, stop_getp;
1623 struct ocrdma_cqe *cqe;
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301624 u32 qpn = 0, wqe_idx = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301625
1626 spin_lock_irqsave(&cq->cq_lock, cq_flags);
1627
1628 /* traverse through the CQEs in the hw CQ,
1629 * find the matching CQE for a given qp,
1630 * mark the matching one discarded by clearing qpn.
1631 * ring the doorbell in the poll_cq() as
1632 * we don't complete out of order cqe.
1633 */
1634
1635 cur_getp = cq->getp;
1636 /* find upto when do we reap the cq. */
1637 stop_getp = cur_getp;
1638 do {
1639 if (is_hw_sq_empty(qp) && (!qp->srq && is_hw_rq_empty(qp)))
1640 break;
1641
1642 cqe = cq->va + cur_getp;
1643 /* if (a) done reaping whole hw cq, or
1644 * (b) qp_xq becomes empty.
1645 * then exit
1646 */
1647 qpn = cqe->cmn.qpn & OCRDMA_CQE_QPN_MASK;
1648 /* if previously discarded cqe found, skip that too. */
1649 /* check for matching qp */
1650 if (qpn == 0 || qpn != qp->id)
1651 goto skip_cqe;
1652
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301653 if (is_cqe_for_sq(cqe)) {
Parav Panditfe2caef2012-03-21 04:09:06 +05301654 ocrdma_hwq_inc_tail(&qp->sq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301655 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301656 if (qp->srq) {
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301657 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
1658 OCRDMA_CQE_BUFTAG_SHIFT) &
1659 qp->srq->rq.max_wqe_idx;
ssh10db287ec2016-12-24 21:50:06 +05301660 BUG_ON(wqe_idx < 1);
Parav Panditfe2caef2012-03-21 04:09:06 +05301661 spin_lock_irqsave(&qp->srq->q_lock, flags);
1662 ocrdma_hwq_inc_tail(&qp->srq->rq);
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301663 ocrdma_srq_toggle_bit(qp->srq, wqe_idx - 1);
Parav Panditfe2caef2012-03-21 04:09:06 +05301664 spin_unlock_irqrestore(&qp->srq->q_lock, flags);
1665
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301666 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05301667 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301668 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301669 }
Selvin Xaviercf5788a2014-02-04 11:57:03 +05301670 /* mark cqe discarded so that it is not picked up later
1671 * in the poll_cq().
1672 */
1673 discard_cnt += 1;
1674 cqe->cmn.qpn = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301675skip_cqe:
1676 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
1677 } while (cur_getp != stop_getp);
1678 spin_unlock_irqrestore(&cq->cq_lock, cq_flags);
1679}
1680
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301681void ocrdma_del_flush_qp(struct ocrdma_qp *qp)
Parav Panditfe2caef2012-03-21 04:09:06 +05301682{
1683 int found = false;
1684 unsigned long flags;
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301685 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301686 /* sync with any active CQ poll */
1687
1688 spin_lock_irqsave(&dev->flush_q_lock, flags);
1689 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1690 if (found)
1691 list_del(&qp->sq_entry);
1692 if (!qp->srq) {
1693 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1694 if (found)
1695 list_del(&qp->rq_entry);
1696 }
1697 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
1698}
1699
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001700int ocrdma_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +05301701{
Parav Panditfe2caef2012-03-21 04:09:06 +05301702 struct ocrdma_pd *pd;
1703 struct ocrdma_qp *qp;
1704 struct ocrdma_dev *dev;
1705 struct ib_qp_attr attrs;
Devesh Sharmafe488222015-05-19 11:32:34 +05301706 int attr_mask;
Dan Carpenterd19081e2012-05-02 09:14:47 +03001707 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05301708
1709 qp = get_ocrdma_qp(ibqp);
Mitesh Ahujad2b8f7b2014-12-18 14:13:06 +05301710 dev = get_ocrdma_dev(ibqp->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301711
Parav Panditfe2caef2012-03-21 04:09:06 +05301712 pd = qp->pd;
1713
1714 /* change the QP state to ERROR */
Devesh Sharmafe488222015-05-19 11:32:34 +05301715 if (qp->state != OCRDMA_QPS_RST) {
1716 attrs.qp_state = IB_QPS_ERR;
1717 attr_mask = IB_QP_STATE;
1718 _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
1719 }
Parav Panditfe2caef2012-03-21 04:09:06 +05301720 /* ensure that CQEs for newly created QP (whose id may be same with
1721 * one which just getting destroyed are same), dont get
1722 * discarded until the old CQEs are discarded.
1723 */
1724 mutex_lock(&dev->dev_lock);
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +05301725 (void) ocrdma_mbx_destroy_qp(dev, qp);
Parav Panditfe2caef2012-03-21 04:09:06 +05301726
1727 /*
1728 * acquire CQ lock while destroy is in progress, in order to
1729 * protect against proessing in-flight CQEs for this QP.
1730 */
Dan Carpenterd19081e2012-05-02 09:14:47 +03001731 spin_lock_irqsave(&qp->sq_cq->cq_lock, flags);
Bart Van Asschebeae9eb2018-07-06 13:04:30 -07001732 if (qp->rq_cq && (qp->rq_cq != qp->sq_cq)) {
Dan Carpenterd19081e2012-05-02 09:14:47 +03001733 spin_lock(&qp->rq_cq->cq_lock);
Bart Van Asschebeae9eb2018-07-06 13:04:30 -07001734 ocrdma_del_qpn_map(dev, qp);
Dan Carpenterd19081e2012-05-02 09:14:47 +03001735 spin_unlock(&qp->rq_cq->cq_lock);
Bart Van Asschebeae9eb2018-07-06 13:04:30 -07001736 } else {
1737 ocrdma_del_qpn_map(dev, qp);
1738 }
Dan Carpenterd19081e2012-05-02 09:14:47 +03001739 spin_unlock_irqrestore(&qp->sq_cq->cq_lock, flags);
Parav Panditfe2caef2012-03-21 04:09:06 +05301740
1741 if (!pd->uctx) {
1742 ocrdma_discard_cqes(qp, qp->sq_cq);
1743 ocrdma_discard_cqes(qp, qp->rq_cq);
1744 }
1745 mutex_unlock(&dev->dev_lock);
1746
1747 if (pd->uctx) {
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301748 ocrdma_del_mmap(pd->uctx, (u64) qp->sq.pa,
1749 PAGE_ALIGN(qp->sq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301750 if (!qp->srq)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301751 ocrdma_del_mmap(pd->uctx, (u64) qp->rq.pa,
1752 PAGE_ALIGN(qp->rq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301753 }
1754
1755 ocrdma_del_flush_qp(qp);
1756
Parav Panditfe2caef2012-03-21 04:09:06 +05301757 kfree(qp->wqe_wr_id_tbl);
1758 kfree(qp->rqe_wr_id_tbl);
1759 kfree(qp);
Mitesh Ahuja4b8180a2014-12-18 14:13:01 +05301760 return 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05301761}
1762
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301763static int ocrdma_copy_srq_uresp(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
1764 struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +05301765{
1766 int status;
1767 struct ocrdma_create_srq_uresp uresp;
1768
Dan Carpenter63ea3742013-07-29 22:34:29 +03001769 memset(&uresp, 0, sizeof(uresp));
Parav Panditfe2caef2012-03-21 04:09:06 +05301770 uresp.rq_dbid = srq->rq.dbid;
1771 uresp.num_rq_pages = 1;
Devesh Sharma1b76d382014-09-05 19:35:40 +05301772 uresp.rq_page_addr[0] = virt_to_phys(srq->rq.va);
Parav Panditfe2caef2012-03-21 04:09:06 +05301773 uresp.rq_page_size = srq->rq.len;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301774 uresp.db_page_addr = dev->nic_info.unmapped_db +
1775 (srq->pd->id * dev->nic_info.db_page_size);
1776 uresp.db_page_size = dev->nic_info.db_page_size;
Parav Panditfe2caef2012-03-21 04:09:06 +05301777 uresp.num_rqe_allocated = srq->rq.max_cnt;
Devesh Sharma21c33912014-02-04 11:56:56 +05301778 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
Naresh Gottumukkalaf11220e2013-08-26 15:27:42 +05301779 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ_OFFSET;
Parav Panditfe2caef2012-03-21 04:09:06 +05301780 uresp.db_shift = 24;
1781 } else {
1782 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
1783 uresp.db_shift = 16;
1784 }
1785
1786 status = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
1787 if (status)
1788 return status;
1789 status = ocrdma_add_mmap(srq->pd->uctx, uresp.rq_page_addr[0],
1790 uresp.rq_page_size);
1791 if (status)
1792 return status;
1793 return status;
1794}
1795
1796struct ib_srq *ocrdma_create_srq(struct ib_pd *ibpd,
1797 struct ib_srq_init_attr *init_attr,
1798 struct ib_udata *udata)
1799{
1800 int status = -ENOMEM;
1801 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05301802 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301803 struct ocrdma_srq *srq;
1804
1805 if (init_attr->attr.max_sge > dev->attr.max_recv_sge)
1806 return ERR_PTR(-EINVAL);
1807 if (init_attr->attr.max_wr > dev->attr.max_rqe)
1808 return ERR_PTR(-EINVAL);
1809
1810 srq = kzalloc(sizeof(*srq), GFP_KERNEL);
1811 if (!srq)
1812 return ERR_PTR(status);
1813
1814 spin_lock_init(&srq->q_lock);
Parav Panditfe2caef2012-03-21 04:09:06 +05301815 srq->pd = pd;
1816 srq->db = dev->nic_info.db + (pd->id * dev->nic_info.db_page_size);
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301817 status = ocrdma_mbx_create_srq(dev, srq, init_attr, pd);
Parav Panditfe2caef2012-03-21 04:09:06 +05301818 if (status)
1819 goto err;
1820
1821 if (udata == NULL) {
Dan Carpenterf0c6e882017-07-13 10:46:49 +03001822 status = -ENOMEM;
Kees Cook6396bb22018-06-12 14:03:40 -07001823 srq->rqe_wr_id_tbl = kcalloc(srq->rq.max_cnt, sizeof(u64),
1824 GFP_KERNEL);
Parav Panditfe2caef2012-03-21 04:09:06 +05301825 if (srq->rqe_wr_id_tbl == NULL)
1826 goto arm_err;
1827
1828 srq->bit_fields_len = (srq->rq.max_cnt / 32) +
1829 (srq->rq.max_cnt % 32 ? 1 : 0);
1830 srq->idx_bit_fields =
Kees Cook6da2ec52018-06-12 13:55:00 -07001831 kmalloc_array(srq->bit_fields_len, sizeof(u32),
1832 GFP_KERNEL);
Parav Panditfe2caef2012-03-21 04:09:06 +05301833 if (srq->idx_bit_fields == NULL)
1834 goto arm_err;
1835 memset(srq->idx_bit_fields, 0xff,
1836 srq->bit_fields_len * sizeof(u32));
1837 }
1838
1839 if (init_attr->attr.srq_limit) {
1840 status = ocrdma_mbx_modify_srq(srq, &init_attr->attr);
1841 if (status)
1842 goto arm_err;
1843 }
1844
Parav Panditfe2caef2012-03-21 04:09:06 +05301845 if (udata) {
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301846 status = ocrdma_copy_srq_uresp(dev, srq, udata);
Parav Panditfe2caef2012-03-21 04:09:06 +05301847 if (status)
1848 goto arm_err;
1849 }
1850
Parav Panditfe2caef2012-03-21 04:09:06 +05301851 return &srq->ibsrq;
1852
1853arm_err:
1854 ocrdma_mbx_destroy_srq(dev, srq);
1855err:
1856 kfree(srq->rqe_wr_id_tbl);
1857 kfree(srq->idx_bit_fields);
1858 kfree(srq);
1859 return ERR_PTR(status);
1860}
1861
1862int ocrdma_modify_srq(struct ib_srq *ibsrq,
1863 struct ib_srq_attr *srq_attr,
1864 enum ib_srq_attr_mask srq_attr_mask,
1865 struct ib_udata *udata)
1866{
Markus Elfring0ca4c392015-12-26 18:18:18 +01001867 int status;
Parav Panditfe2caef2012-03-21 04:09:06 +05301868 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301869
1870 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301871 if (srq_attr_mask & IB_SRQ_MAX_WR)
1872 status = -EINVAL;
1873 else
1874 status = ocrdma_mbx_modify_srq(srq, srq_attr);
1875 return status;
1876}
1877
1878int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
1879{
1880 int status;
1881 struct ocrdma_srq *srq;
Parav Panditfe2caef2012-03-21 04:09:06 +05301882
1883 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301884 status = ocrdma_mbx_query_srq(srq, srq_attr);
1885 return status;
1886}
1887
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03001888int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
Parav Panditfe2caef2012-03-21 04:09:06 +05301889{
1890 int status;
1891 struct ocrdma_srq *srq;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05301892 struct ocrdma_dev *dev = get_ocrdma_dev(ibsrq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05301893
1894 srq = get_ocrdma_srq(ibsrq);
Parav Panditfe2caef2012-03-21 04:09:06 +05301895
1896 status = ocrdma_mbx_destroy_srq(dev, srq);
1897
1898 if (srq->pd->uctx)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301899 ocrdma_del_mmap(srq->pd->uctx, (u64) srq->rq.pa,
1900 PAGE_ALIGN(srq->rq.len));
Parav Panditfe2caef2012-03-21 04:09:06 +05301901
Parav Panditfe2caef2012-03-21 04:09:06 +05301902 kfree(srq->idx_bit_fields);
1903 kfree(srq->rqe_wr_id_tbl);
1904 kfree(srq);
1905 return status;
1906}
1907
1908/* unprivileged verbs and their support functions. */
1909static void ocrdma_build_ud_hdr(struct ocrdma_qp *qp,
1910 struct ocrdma_hdr_wqe *hdr,
Bart Van Asschef696bf62018-07-18 09:25:14 -07001911 const struct ib_send_wr *wr)
Parav Panditfe2caef2012-03-21 04:09:06 +05301912{
1913 struct ocrdma_ewqe_ud_hdr *ud_hdr =
1914 (struct ocrdma_ewqe_ud_hdr *)(hdr + 1);
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001915 struct ocrdma_ah *ah = get_ocrdma_ah(ud_wr(wr)->ah);
Parav Panditfe2caef2012-03-21 04:09:06 +05301916
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001917 ud_hdr->rsvd_dest_qpn = ud_wr(wr)->remote_qpn;
Parav Panditfe2caef2012-03-21 04:09:06 +05301918 if (qp->qp_type == IB_QPT_GSI)
1919 ud_hdr->qkey = qp->qkey;
1920 else
Christoph Hellwige622f2f2015-10-08 09:16:33 +01001921 ud_hdr->qkey = ud_wr(wr)->remote_qkey;
Parav Panditfe2caef2012-03-21 04:09:06 +05301922 ud_hdr->rsvd_ahid = ah->id;
Devesh Sharma6b062662016-01-28 08:59:57 -05001923 ud_hdr->hdr_type = ah->hdr_type;
Devesh Sharma29565f22014-12-18 14:13:07 +05301924 if (ah->av->valid & OCRDMA_AV_VLAN_VALID)
1925 hdr->cw |= (OCRDMA_FLAG_AH_VLAN_PR << OCRDMA_WQE_FLAGS_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05301926}
1927
1928static void ocrdma_build_sges(struct ocrdma_hdr_wqe *hdr,
1929 struct ocrdma_sge *sge, int num_sge,
1930 struct ib_sge *sg_list)
1931{
1932 int i;
1933
1934 for (i = 0; i < num_sge; i++) {
1935 sge[i].lrkey = sg_list[i].lkey;
1936 sge[i].addr_lo = sg_list[i].addr;
1937 sge[i].addr_hi = upper_32_bits(sg_list[i].addr);
1938 sge[i].len = sg_list[i].length;
1939 hdr->total_len += sg_list[i].length;
1940 }
1941 if (num_sge == 0)
1942 memset(sge, 0, sizeof(*sge));
1943}
1944
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301945static inline uint32_t ocrdma_sglist_len(struct ib_sge *sg_list, int num_sge)
1946{
1947 uint32_t total_len = 0, i;
1948
1949 for (i = 0; i < num_sge; i++)
1950 total_len += sg_list[i].length;
1951 return total_len;
1952}
1953
1954
Parav Panditfe2caef2012-03-21 04:09:06 +05301955static int ocrdma_build_inline_sges(struct ocrdma_qp *qp,
1956 struct ocrdma_hdr_wqe *hdr,
1957 struct ocrdma_sge *sge,
Bart Van Asschef696bf62018-07-18 09:25:14 -07001958 const struct ib_send_wr *wr, u32 wqe_size)
Parav Panditfe2caef2012-03-21 04:09:06 +05301959{
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301960 int i;
1961 char *dpp_addr;
1962
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301963 if (wr->send_flags & IB_SEND_INLINE && qp->qp_type != IB_QPT_UD) {
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301964 hdr->total_len = ocrdma_sglist_len(wr->sg_list, wr->num_sge);
1965 if (unlikely(hdr->total_len > qp->max_inline_data)) {
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00001966 pr_err("%s() supported_len=0x%x,\n"
Masanari Iida1a84db52014-08-29 23:37:33 +09001967 " unsupported len req=0x%x\n", __func__,
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301968 qp->max_inline_data, hdr->total_len);
Parav Panditfe2caef2012-03-21 04:09:06 +05301969 return -EINVAL;
1970 }
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301971 dpp_addr = (char *)sge;
1972 for (i = 0; i < wr->num_sge; i++) {
1973 memcpy(dpp_addr,
1974 (void *)(unsigned long)wr->sg_list[i].addr,
1975 wr->sg_list[i].length);
1976 dpp_addr += wr->sg_list[i].length;
1977 }
1978
Parav Panditfe2caef2012-03-21 04:09:06 +05301979 wqe_size += roundup(hdr->total_len, OCRDMA_WQE_ALIGN_BYTES);
Naresh Gottumukkala117e6dd2013-08-26 15:27:48 +05301980 if (0 == hdr->total_len)
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05301981 wqe_size += sizeof(struct ocrdma_sge);
Parav Panditfe2caef2012-03-21 04:09:06 +05301982 hdr->cw |= (OCRDMA_TYPE_INLINE << OCRDMA_WQE_TYPE_SHIFT);
1983 } else {
1984 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
1985 if (wr->num_sge)
1986 wqe_size += (wr->num_sge * sizeof(struct ocrdma_sge));
1987 else
1988 wqe_size += sizeof(struct ocrdma_sge);
1989 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
1990 }
1991 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
1992 return 0;
1993}
1994
1995static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
Bart Van Asschef696bf62018-07-18 09:25:14 -07001996 const struct ib_send_wr *wr)
Parav Panditfe2caef2012-03-21 04:09:06 +05301997{
1998 int status;
1999 struct ocrdma_sge *sge;
2000 u32 wqe_size = sizeof(*hdr);
2001
2002 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2003 ocrdma_build_ud_hdr(qp, hdr, wr);
2004 sge = (struct ocrdma_sge *)(hdr + 2);
2005 wqe_size += sizeof(struct ocrdma_ewqe_ud_hdr);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302006 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302007 sge = (struct ocrdma_sge *)(hdr + 1);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302008 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302009
2010 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2011 return status;
2012}
2013
2014static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
Bart Van Asschef696bf62018-07-18 09:25:14 -07002015 const struct ib_send_wr *wr)
Parav Panditfe2caef2012-03-21 04:09:06 +05302016{
2017 int status;
2018 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2019 struct ocrdma_sge *sge = ext_rw + 1;
2020 u32 wqe_size = sizeof(*hdr) + sizeof(*ext_rw);
2021
2022 status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size);
2023 if (status)
2024 return status;
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002025 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2026 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2027 ext_rw->lrkey = rdma_wr(wr)->rkey;
Parav Panditfe2caef2012-03-21 04:09:06 +05302028 ext_rw->len = hdr->total_len;
2029 return 0;
2030}
2031
2032static void ocrdma_build_read(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr,
Bart Van Asschef696bf62018-07-18 09:25:14 -07002033 const struct ib_send_wr *wr)
Parav Panditfe2caef2012-03-21 04:09:06 +05302034{
2035 struct ocrdma_sge *ext_rw = (struct ocrdma_sge *)(hdr + 1);
2036 struct ocrdma_sge *sge = ext_rw + 1;
2037 u32 wqe_size = ((wr->num_sge + 1) * sizeof(struct ocrdma_sge)) +
2038 sizeof(struct ocrdma_hdr_wqe);
2039
2040 ocrdma_build_sges(hdr, sge, wr->num_sge, wr->sg_list);
2041 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2042 hdr->cw |= (OCRDMA_READ << OCRDMA_WQE_OPCODE_SHIFT);
2043 hdr->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2044
Christoph Hellwige622f2f2015-10-08 09:16:33 +01002045 ext_rw->addr_lo = rdma_wr(wr)->remote_addr;
2046 ext_rw->addr_hi = upper_32_bits(rdma_wr(wr)->remote_addr);
2047 ext_rw->lrkey = rdma_wr(wr)->rkey;
Parav Panditfe2caef2012-03-21 04:09:06 +05302048 ext_rw->len = hdr->total_len;
2049}
2050
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302051static int get_encoded_page_size(int pg_sz)
2052{
2053 /* Max size is 256M 4096 << 16 */
2054 int i = 0;
2055 for (; i < 17; i++)
2056 if (pg_sz == (4096 << i))
2057 break;
2058 return i;
2059}
2060
Sagi Grimberg2eaa1c52015-10-13 19:11:28 +03002061static int ocrdma_build_reg(struct ocrdma_qp *qp,
2062 struct ocrdma_hdr_wqe *hdr,
Bart Van Asschef696bf62018-07-18 09:25:14 -07002063 const struct ib_reg_wr *wr)
Sagi Grimberg2eaa1c52015-10-13 19:11:28 +03002064{
2065 u64 fbo;
2066 struct ocrdma_ewqe_fr *fast_reg = (struct ocrdma_ewqe_fr *)(hdr + 1);
2067 struct ocrdma_mr *mr = get_ocrdma_mr(wr->mr);
2068 struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
2069 struct ocrdma_pbe *pbe;
2070 u32 wqe_size = sizeof(*fast_reg) + sizeof(*hdr);
2071 int num_pbes = 0, i;
2072
2073 wqe_size = roundup(wqe_size, OCRDMA_WQE_ALIGN_BYTES);
2074
2075 hdr->cw |= (OCRDMA_FR_MR << OCRDMA_WQE_OPCODE_SHIFT);
2076 hdr->cw |= ((wqe_size / OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT);
2077
2078 if (wr->access & IB_ACCESS_LOCAL_WRITE)
2079 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_LOCAL_WR;
2080 if (wr->access & IB_ACCESS_REMOTE_WRITE)
2081 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_WR;
2082 if (wr->access & IB_ACCESS_REMOTE_READ)
2083 hdr->rsvd_lkey_flags |= OCRDMA_LKEY_FLAG_REMOTE_RD;
2084 hdr->lkey = wr->key;
2085 hdr->total_len = mr->ibmr.length;
2086
2087 fbo = mr->ibmr.iova - mr->pages[0];
2088
2089 fast_reg->va_hi = upper_32_bits(mr->ibmr.iova);
2090 fast_reg->va_lo = (u32) (mr->ibmr.iova & 0xffffffff);
2091 fast_reg->fbo_hi = upper_32_bits(fbo);
2092 fast_reg->fbo_lo = (u32) fbo & 0xffffffff;
2093 fast_reg->num_sges = mr->npages;
2094 fast_reg->size_sge = get_encoded_page_size(mr->ibmr.page_size);
2095
2096 pbe = pbl_tbl->va;
2097 for (i = 0; i < mr->npages; i++) {
2098 u64 buf_addr = mr->pages[i];
2099
2100 pbe->pa_lo = cpu_to_le32((u32) (buf_addr & PAGE_MASK));
2101 pbe->pa_hi = cpu_to_le32((u32) upper_32_bits(buf_addr));
2102 num_pbes += 1;
2103 pbe++;
2104
2105 /* if the pbl is full storing the pbes,
2106 * move to next pbl.
2107 */
2108 if (num_pbes == (mr->hwmr.pbl_size/sizeof(u64))) {
2109 pbl_tbl++;
2110 pbe = (struct ocrdma_pbe *)pbl_tbl->va;
2111 }
2112 }
2113
2114 return 0;
2115}
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302116
Parav Panditfe2caef2012-03-21 04:09:06 +05302117static void ocrdma_ring_sq_db(struct ocrdma_qp *qp)
2118{
Devesh Sharma2df84fa82014-02-04 11:56:55 +05302119 u32 val = qp->sq.dbid | (1 << OCRDMA_DB_SQ_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05302120
2121 iowrite32(val, qp->sq_db);
2122}
2123
Bart Van Assched34ac5c2018-07-18 09:25:32 -07002124int ocrdma_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
2125 const struct ib_send_wr **bad_wr)
Parav Panditfe2caef2012-03-21 04:09:06 +05302126{
2127 int status = 0;
2128 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2129 struct ocrdma_hdr_wqe *hdr;
2130 unsigned long flags;
2131
2132 spin_lock_irqsave(&qp->q_lock, flags);
2133 if (qp->state != OCRDMA_QPS_RTS && qp->state != OCRDMA_QPS_SQD) {
2134 spin_unlock_irqrestore(&qp->q_lock, flags);
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00002135 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05302136 return -EINVAL;
2137 }
2138
2139 while (wr) {
Mitesh Ahujaf252b5d2014-06-10 19:32:20 +05302140 if (qp->qp_type == IB_QPT_UD &&
2141 (wr->opcode != IB_WR_SEND &&
2142 wr->opcode != IB_WR_SEND_WITH_IMM)) {
2143 *bad_wr = wr;
2144 status = -EINVAL;
2145 break;
2146 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302147 if (ocrdma_hwq_free_cnt(&qp->sq) == 0 ||
2148 wr->num_sge > qp->sq.max_sges) {
Naresh Gottumukkalaf6ddcf72013-06-10 04:42:40 +00002149 *bad_wr = wr;
Parav Panditfe2caef2012-03-21 04:09:06 +05302150 status = -ENOMEM;
2151 break;
2152 }
2153 hdr = ocrdma_hwq_head(&qp->sq);
2154 hdr->cw = 0;
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05302155 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
Parav Panditfe2caef2012-03-21 04:09:06 +05302156 hdr->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2157 if (wr->send_flags & IB_SEND_FENCE)
2158 hdr->cw |=
2159 (OCRDMA_FLAG_FENCE_L << OCRDMA_WQE_FLAGS_SHIFT);
2160 if (wr->send_flags & IB_SEND_SOLICITED)
2161 hdr->cw |=
2162 (OCRDMA_FLAG_SOLICIT << OCRDMA_WQE_FLAGS_SHIFT);
2163 hdr->total_len = 0;
2164 switch (wr->opcode) {
2165 case IB_WR_SEND_WITH_IMM:
2166 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2167 hdr->immdt = ntohl(wr->ex.imm_data);
Bart Van Assche705dec32017-10-11 10:49:14 -07002168 /* fall through */
Parav Panditfe2caef2012-03-21 04:09:06 +05302169 case IB_WR_SEND:
2170 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2171 ocrdma_build_send(qp, hdr, wr);
2172 break;
2173 case IB_WR_SEND_WITH_INV:
2174 hdr->cw |= (OCRDMA_FLAG_INV << OCRDMA_WQE_FLAGS_SHIFT);
2175 hdr->cw |= (OCRDMA_SEND << OCRDMA_WQE_OPCODE_SHIFT);
2176 hdr->lkey = wr->ex.invalidate_rkey;
2177 status = ocrdma_build_send(qp, hdr, wr);
2178 break;
2179 case IB_WR_RDMA_WRITE_WITH_IMM:
2180 hdr->cw |= (OCRDMA_FLAG_IMM << OCRDMA_WQE_FLAGS_SHIFT);
2181 hdr->immdt = ntohl(wr->ex.imm_data);
Bart Van Assche705dec32017-10-11 10:49:14 -07002182 /* fall through */
Parav Panditfe2caef2012-03-21 04:09:06 +05302183 case IB_WR_RDMA_WRITE:
2184 hdr->cw |= (OCRDMA_WRITE << OCRDMA_WQE_OPCODE_SHIFT);
2185 status = ocrdma_build_write(qp, hdr, wr);
2186 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05302187 case IB_WR_RDMA_READ:
2188 ocrdma_build_read(qp, hdr, wr);
2189 break;
2190 case IB_WR_LOCAL_INV:
2191 hdr->cw |=
2192 (OCRDMA_LKEY_INV << OCRDMA_WQE_OPCODE_SHIFT);
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302193 hdr->cw |= ((sizeof(struct ocrdma_hdr_wqe) +
2194 sizeof(struct ocrdma_sge)) /
Parav Panditfe2caef2012-03-21 04:09:06 +05302195 OCRDMA_WQE_STRIDE) << OCRDMA_WQE_SIZE_SHIFT;
2196 hdr->lkey = wr->ex.invalidate_rkey;
2197 break;
Sagi Grimberg2eaa1c52015-10-13 19:11:28 +03002198 case IB_WR_REG_MR:
2199 status = ocrdma_build_reg(qp, hdr, reg_wr(wr));
2200 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05302201 default:
2202 status = -EINVAL;
2203 break;
2204 }
2205 if (status) {
2206 *bad_wr = wr;
2207 break;
2208 }
Naresh Gottumukkala2b51a9b2013-08-26 15:27:43 +05302209 if (wr->send_flags & IB_SEND_SIGNALED || qp->signaled)
Parav Panditfe2caef2012-03-21 04:09:06 +05302210 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 1;
2211 else
2212 qp->wqe_wr_id_tbl[qp->sq.head].signaled = 0;
2213 qp->wqe_wr_id_tbl[qp->sq.head].wrid = wr->wr_id;
2214 ocrdma_cpu_to_le32(hdr, ((hdr->cw >> OCRDMA_WQE_SIZE_SHIFT) &
2215 OCRDMA_WQE_SIZE_MASK) * OCRDMA_WQE_STRIDE);
2216 /* make sure wqe is written before adapter can access it */
2217 wmb();
2218 /* inform hw to start processing it */
2219 ocrdma_ring_sq_db(qp);
2220
2221 /* update pointer, counter for next wr */
2222 ocrdma_hwq_inc_head(&qp->sq);
2223 wr = wr->next;
2224 }
2225 spin_unlock_irqrestore(&qp->q_lock, flags);
2226 return status;
2227}
2228
2229static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
2230{
Devesh Sharma2df84fa82014-02-04 11:56:55 +05302231 u32 val = qp->rq.dbid | (1 << OCRDMA_DB_RQ_SHIFT);
Parav Panditfe2caef2012-03-21 04:09:06 +05302232
Devesh Sharma2df84fa82014-02-04 11:56:55 +05302233 iowrite32(val, qp->rq_db);
Parav Panditfe2caef2012-03-21 04:09:06 +05302234}
2235
Bart Van Assched34ac5c2018-07-18 09:25:32 -07002236static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe,
2237 const struct ib_recv_wr *wr, u16 tag)
Parav Panditfe2caef2012-03-21 04:09:06 +05302238{
2239 u32 wqe_size = 0;
2240 struct ocrdma_sge *sge;
2241 if (wr->num_sge)
2242 wqe_size = (wr->num_sge * sizeof(*sge)) + sizeof(*rqe);
2243 else
2244 wqe_size = sizeof(*sge) + sizeof(*rqe);
2245
2246 rqe->cw = ((wqe_size / OCRDMA_WQE_STRIDE) <<
2247 OCRDMA_WQE_SIZE_SHIFT);
2248 rqe->cw |= (OCRDMA_FLAG_SIG << OCRDMA_WQE_FLAGS_SHIFT);
2249 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2250 rqe->total_len = 0;
2251 rqe->rsvd_tag = tag;
2252 sge = (struct ocrdma_sge *)(rqe + 1);
2253 ocrdma_build_sges(rqe, sge, wr->num_sge, wr->sg_list);
2254 ocrdma_cpu_to_le32(rqe, wqe_size);
2255}
2256
Bart Van Assched34ac5c2018-07-18 09:25:32 -07002257int ocrdma_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
2258 const struct ib_recv_wr **bad_wr)
Parav Panditfe2caef2012-03-21 04:09:06 +05302259{
2260 int status = 0;
2261 unsigned long flags;
2262 struct ocrdma_qp *qp = get_ocrdma_qp(ibqp);
2263 struct ocrdma_hdr_wqe *rqe;
2264
2265 spin_lock_irqsave(&qp->q_lock, flags);
2266 if (qp->state == OCRDMA_QPS_RST || qp->state == OCRDMA_QPS_ERR) {
2267 spin_unlock_irqrestore(&qp->q_lock, flags);
2268 *bad_wr = wr;
2269 return -EINVAL;
2270 }
2271 while (wr) {
2272 if (ocrdma_hwq_free_cnt(&qp->rq) == 0 ||
2273 wr->num_sge > qp->rq.max_sges) {
2274 *bad_wr = wr;
2275 status = -ENOMEM;
2276 break;
2277 }
2278 rqe = ocrdma_hwq_head(&qp->rq);
2279 ocrdma_build_rqe(rqe, wr, 0);
2280
2281 qp->rqe_wr_id_tbl[qp->rq.head] = wr->wr_id;
2282 /* make sure rqe is written before adapter can access it */
2283 wmb();
2284
2285 /* inform hw to start processing it */
2286 ocrdma_ring_rq_db(qp);
2287
2288 /* update pointer, counter for next wr */
2289 ocrdma_hwq_inc_head(&qp->rq);
2290 wr = wr->next;
2291 }
2292 spin_unlock_irqrestore(&qp->q_lock, flags);
2293 return status;
2294}
2295
2296/* cqe for srq's rqe can potentially arrive out of order.
2297 * index gives the entry in the shadow table where to store
2298 * the wr_id. tag/index is returned in cqe to reference back
2299 * for a given rqe.
2300 */
2301static int ocrdma_srq_get_idx(struct ocrdma_srq *srq)
2302{
2303 int row = 0;
2304 int indx = 0;
2305
2306 for (row = 0; row < srq->bit_fields_len; row++) {
2307 if (srq->idx_bit_fields[row]) {
2308 indx = ffs(srq->idx_bit_fields[row]);
2309 indx = (row * 32) + (indx - 1);
ssh10db287ec2016-12-24 21:50:06 +05302310 BUG_ON(indx >= srq->rq.max_cnt);
Parav Panditfe2caef2012-03-21 04:09:06 +05302311 ocrdma_srq_toggle_bit(srq, indx);
2312 break;
2313 }
2314 }
2315
ssh10db287ec2016-12-24 21:50:06 +05302316 BUG_ON(row == srq->bit_fields_len);
Selvin Xaviercf5788a2014-02-04 11:57:03 +05302317 return indx + 1; /* Use from index 1 */
Parav Panditfe2caef2012-03-21 04:09:06 +05302318}
2319
2320static void ocrdma_ring_srq_db(struct ocrdma_srq *srq)
2321{
2322 u32 val = srq->rq.dbid | (1 << 16);
2323
2324 iowrite32(val, srq->db + OCRDMA_DB_GEN2_SRQ_OFFSET);
2325}
2326
Bart Van Assched34ac5c2018-07-18 09:25:32 -07002327int ocrdma_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
2328 const struct ib_recv_wr **bad_wr)
Parav Panditfe2caef2012-03-21 04:09:06 +05302329{
2330 int status = 0;
2331 unsigned long flags;
2332 struct ocrdma_srq *srq;
2333 struct ocrdma_hdr_wqe *rqe;
2334 u16 tag;
2335
2336 srq = get_ocrdma_srq(ibsrq);
2337
2338 spin_lock_irqsave(&srq->q_lock, flags);
2339 while (wr) {
2340 if (ocrdma_hwq_free_cnt(&srq->rq) == 0 ||
2341 wr->num_sge > srq->rq.max_sges) {
2342 status = -ENOMEM;
2343 *bad_wr = wr;
2344 break;
2345 }
2346 tag = ocrdma_srq_get_idx(srq);
2347 rqe = ocrdma_hwq_head(&srq->rq);
2348 ocrdma_build_rqe(rqe, wr, tag);
2349
2350 srq->rqe_wr_id_tbl[tag] = wr->wr_id;
2351 /* make sure rqe is written before adapter can perform DMA */
2352 wmb();
2353 /* inform hw to start processing it */
2354 ocrdma_ring_srq_db(srq);
2355 /* update pointer, counter for next wr */
2356 ocrdma_hwq_inc_head(&srq->rq);
2357 wr = wr->next;
2358 }
2359 spin_unlock_irqrestore(&srq->q_lock, flags);
2360 return status;
2361}
2362
2363static enum ib_wc_status ocrdma_to_ibwc_err(u16 status)
2364{
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302365 enum ib_wc_status ibwc_status;
Parav Panditfe2caef2012-03-21 04:09:06 +05302366
2367 switch (status) {
2368 case OCRDMA_CQE_GENERAL_ERR:
2369 ibwc_status = IB_WC_GENERAL_ERR;
2370 break;
2371 case OCRDMA_CQE_LOC_LEN_ERR:
2372 ibwc_status = IB_WC_LOC_LEN_ERR;
2373 break;
2374 case OCRDMA_CQE_LOC_QP_OP_ERR:
2375 ibwc_status = IB_WC_LOC_QP_OP_ERR;
2376 break;
2377 case OCRDMA_CQE_LOC_EEC_OP_ERR:
2378 ibwc_status = IB_WC_LOC_EEC_OP_ERR;
2379 break;
2380 case OCRDMA_CQE_LOC_PROT_ERR:
2381 ibwc_status = IB_WC_LOC_PROT_ERR;
2382 break;
2383 case OCRDMA_CQE_WR_FLUSH_ERR:
2384 ibwc_status = IB_WC_WR_FLUSH_ERR;
2385 break;
2386 case OCRDMA_CQE_MW_BIND_ERR:
2387 ibwc_status = IB_WC_MW_BIND_ERR;
2388 break;
2389 case OCRDMA_CQE_BAD_RESP_ERR:
2390 ibwc_status = IB_WC_BAD_RESP_ERR;
2391 break;
2392 case OCRDMA_CQE_LOC_ACCESS_ERR:
2393 ibwc_status = IB_WC_LOC_ACCESS_ERR;
2394 break;
2395 case OCRDMA_CQE_REM_INV_REQ_ERR:
2396 ibwc_status = IB_WC_REM_INV_REQ_ERR;
2397 break;
2398 case OCRDMA_CQE_REM_ACCESS_ERR:
2399 ibwc_status = IB_WC_REM_ACCESS_ERR;
2400 break;
2401 case OCRDMA_CQE_REM_OP_ERR:
2402 ibwc_status = IB_WC_REM_OP_ERR;
2403 break;
2404 case OCRDMA_CQE_RETRY_EXC_ERR:
2405 ibwc_status = IB_WC_RETRY_EXC_ERR;
2406 break;
2407 case OCRDMA_CQE_RNR_RETRY_EXC_ERR:
2408 ibwc_status = IB_WC_RNR_RETRY_EXC_ERR;
2409 break;
2410 case OCRDMA_CQE_LOC_RDD_VIOL_ERR:
2411 ibwc_status = IB_WC_LOC_RDD_VIOL_ERR;
2412 break;
2413 case OCRDMA_CQE_REM_INV_RD_REQ_ERR:
2414 ibwc_status = IB_WC_REM_INV_RD_REQ_ERR;
2415 break;
2416 case OCRDMA_CQE_REM_ABORT_ERR:
2417 ibwc_status = IB_WC_REM_ABORT_ERR;
2418 break;
2419 case OCRDMA_CQE_INV_EECN_ERR:
2420 ibwc_status = IB_WC_INV_EECN_ERR;
2421 break;
2422 case OCRDMA_CQE_INV_EEC_STATE_ERR:
2423 ibwc_status = IB_WC_INV_EEC_STATE_ERR;
2424 break;
2425 case OCRDMA_CQE_FATAL_ERR:
2426 ibwc_status = IB_WC_FATAL_ERR;
2427 break;
2428 case OCRDMA_CQE_RESP_TIMEOUT_ERR:
2429 ibwc_status = IB_WC_RESP_TIMEOUT_ERR;
2430 break;
2431 default:
2432 ibwc_status = IB_WC_GENERAL_ERR;
2433 break;
Joe Perches2b50176d2013-10-08 16:07:22 -07002434 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302435 return ibwc_status;
2436}
2437
2438static void ocrdma_update_wc(struct ocrdma_qp *qp, struct ib_wc *ibwc,
2439 u32 wqe_idx)
2440{
2441 struct ocrdma_hdr_wqe *hdr;
2442 struct ocrdma_sge *rw;
2443 int opcode;
2444
2445 hdr = ocrdma_hwq_head_from_idx(&qp->sq, wqe_idx);
2446
2447 ibwc->wr_id = qp->wqe_wr_id_tbl[wqe_idx].wrid;
2448 /* Undo the hdr->cw swap */
2449 opcode = le32_to_cpu(hdr->cw) & OCRDMA_WQE_OPCODE_MASK;
2450 switch (opcode) {
2451 case OCRDMA_WRITE:
2452 ibwc->opcode = IB_WC_RDMA_WRITE;
2453 break;
2454 case OCRDMA_READ:
2455 rw = (struct ocrdma_sge *)(hdr + 1);
2456 ibwc->opcode = IB_WC_RDMA_READ;
2457 ibwc->byte_len = rw->len;
2458 break;
2459 case OCRDMA_SEND:
2460 ibwc->opcode = IB_WC_SEND;
2461 break;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302462 case OCRDMA_FR_MR:
Sagi Grimberg191cfed2015-10-13 19:11:44 +03002463 ibwc->opcode = IB_WC_REG_MR;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302464 break;
Parav Panditfe2caef2012-03-21 04:09:06 +05302465 case OCRDMA_LKEY_INV:
2466 ibwc->opcode = IB_WC_LOCAL_INV;
2467 break;
2468 default:
2469 ibwc->status = IB_WC_GENERAL_ERR;
Naresh Gottumukkalaef99c4c2013-06-10 04:42:39 +00002470 pr_err("%s() invalid opcode received = 0x%x\n",
2471 __func__, hdr->cw & OCRDMA_WQE_OPCODE_MASK);
Parav Panditfe2caef2012-03-21 04:09:06 +05302472 break;
Joe Perches2b50176d2013-10-08 16:07:22 -07002473 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302474}
2475
2476static void ocrdma_set_cqe_status_flushed(struct ocrdma_qp *qp,
2477 struct ocrdma_cqe *cqe)
2478{
2479 if (is_cqe_for_sq(cqe)) {
2480 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2481 cqe->flags_status_srcqpn) &
2482 ~OCRDMA_CQE_STATUS_MASK);
2483 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2484 cqe->flags_status_srcqpn) |
2485 (OCRDMA_CQE_WR_FLUSH_ERR <<
2486 OCRDMA_CQE_STATUS_SHIFT));
2487 } else {
2488 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
2489 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2490 cqe->flags_status_srcqpn) &
2491 ~OCRDMA_CQE_UD_STATUS_MASK);
2492 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2493 cqe->flags_status_srcqpn) |
2494 (OCRDMA_CQE_WR_FLUSH_ERR <<
2495 OCRDMA_CQE_UD_STATUS_SHIFT));
2496 } else {
2497 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2498 cqe->flags_status_srcqpn) &
2499 ~OCRDMA_CQE_STATUS_MASK);
2500 cqe->flags_status_srcqpn = cpu_to_le32(le32_to_cpu(
2501 cqe->flags_status_srcqpn) |
2502 (OCRDMA_CQE_WR_FLUSH_ERR <<
2503 OCRDMA_CQE_STATUS_SHIFT));
2504 }
2505 }
2506}
2507
2508static bool ocrdma_update_err_cqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2509 struct ocrdma_qp *qp, int status)
2510{
2511 bool expand = false;
2512
2513 ibwc->byte_len = 0;
2514 ibwc->qp = &qp->ibqp;
2515 ibwc->status = ocrdma_to_ibwc_err(status);
2516
2517 ocrdma_flush_qp(qp);
Naresh Gottumukkala057729c2013-08-07 12:52:35 +05302518 ocrdma_qp_state_change(qp, IB_QPS_ERR, NULL);
Parav Panditfe2caef2012-03-21 04:09:06 +05302519
2520 /* if wqe/rqe pending for which cqe needs to be returned,
2521 * trigger inflating it.
2522 */
2523 if (!is_hw_rq_empty(qp) || !is_hw_sq_empty(qp)) {
2524 expand = true;
2525 ocrdma_set_cqe_status_flushed(qp, cqe);
2526 }
2527 return expand;
2528}
2529
2530static int ocrdma_update_err_rcqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2531 struct ocrdma_qp *qp, int status)
2532{
2533 ibwc->opcode = IB_WC_RECV;
2534 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2535 ocrdma_hwq_inc_tail(&qp->rq);
2536
2537 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2538}
2539
2540static int ocrdma_update_err_scqe(struct ib_wc *ibwc, struct ocrdma_cqe *cqe,
2541 struct ocrdma_qp *qp, int status)
2542{
2543 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2544 ocrdma_hwq_inc_tail(&qp->sq);
2545
2546 return ocrdma_update_err_cqe(ibwc, cqe, qp, status);
2547}
2548
2549
2550static bool ocrdma_poll_err_scqe(struct ocrdma_qp *qp,
2551 struct ocrdma_cqe *cqe, struct ib_wc *ibwc,
2552 bool *polled, bool *stop)
2553{
2554 bool expand;
Selvin Xavierad56ebb2014-12-18 14:12:59 +05302555 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302556 int status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2557 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
Selvin Xavierad56ebb2014-12-18 14:12:59 +05302558 if (status < OCRDMA_MAX_CQE_ERR)
2559 atomic_inc(&dev->cqe_err_stats[status]);
Parav Panditfe2caef2012-03-21 04:09:06 +05302560
2561 /* when hw sq is empty, but rq is not empty, so we continue
2562 * to keep the cqe in order to get the cq event again.
2563 */
2564 if (is_hw_sq_empty(qp) && !is_hw_rq_empty(qp)) {
2565 /* when cq for rq and sq is same, it is safe to return
2566 * flush cqe for RQEs.
2567 */
2568 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2569 *polled = true;
2570 status = OCRDMA_CQE_WR_FLUSH_ERR;
2571 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2572 } else {
2573 /* stop processing further cqe as this cqe is used for
2574 * triggering cq event on buddy cq of RQ.
2575 * When QP is destroyed, this cqe will be removed
2576 * from the cq's hardware q.
2577 */
2578 *polled = false;
2579 *stop = true;
2580 expand = false;
2581 }
Selvin Xaviera96ffb12014-06-10 19:32:19 +05302582 } else if (is_hw_sq_empty(qp)) {
2583 /* Do nothing */
2584 expand = false;
2585 *polled = false;
2586 *stop = false;
Parav Panditfe2caef2012-03-21 04:09:06 +05302587 } else {
2588 *polled = true;
2589 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2590 }
2591 return expand;
2592}
2593
2594static bool ocrdma_poll_success_scqe(struct ocrdma_qp *qp,
2595 struct ocrdma_cqe *cqe,
2596 struct ib_wc *ibwc, bool *polled)
2597{
2598 bool expand = false;
2599 int tail = qp->sq.tail;
2600 u32 wqe_idx;
2601
2602 if (!qp->wqe_wr_id_tbl[tail].signaled) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302603 *polled = false; /* WC cannot be consumed yet */
2604 } else {
2605 ibwc->status = IB_WC_SUCCESS;
2606 ibwc->wc_flags = 0;
2607 ibwc->qp = &qp->ibqp;
2608 ocrdma_update_wc(qp, ibwc, tail);
2609 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302610 }
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302611 wqe_idx = (le32_to_cpu(cqe->wq.wqeidx) &
2612 OCRDMA_CQE_WQEIDX_MASK) & qp->sq.max_wqe_idx;
Parav Panditae3bca92012-08-17 14:45:33 +00002613 if (tail != wqe_idx)
2614 expand = true; /* Coalesced CQE can't be consumed yet */
2615
Parav Panditfe2caef2012-03-21 04:09:06 +05302616 ocrdma_hwq_inc_tail(&qp->sq);
2617 return expand;
2618}
2619
2620static bool ocrdma_poll_scqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2621 struct ib_wc *ibwc, bool *polled, bool *stop)
2622{
2623 int status;
2624 bool expand;
2625
2626 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2627 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
2628
2629 if (status == OCRDMA_CQE_SUCCESS)
2630 expand = ocrdma_poll_success_scqe(qp, cqe, ibwc, polled);
2631 else
2632 expand = ocrdma_poll_err_scqe(qp, cqe, ibwc, polled, stop);
2633 return expand;
2634}
2635
Devesh Sharma6b062662016-01-28 08:59:57 -05002636static int ocrdma_update_ud_rcqe(struct ocrdma_dev *dev, struct ib_wc *ibwc,
2637 struct ocrdma_cqe *cqe)
Parav Panditfe2caef2012-03-21 04:09:06 +05302638{
2639 int status;
Devesh Sharma6b062662016-01-28 08:59:57 -05002640 u16 hdr_type = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302641
2642 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2643 OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
2644 ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
2645 OCRDMA_CQE_SRCQP_MASK;
Selvin Xavieraff3ead2016-02-05 20:06:39 +05302646 ibwc->pkey_index = 0;
Parav Panditfe2caef2012-03-21 04:09:06 +05302647 ibwc->wc_flags = IB_WC_GRH;
2648 ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
Devesh Sharma6b062662016-01-28 08:59:57 -05002649 OCRDMA_CQE_UD_XFER_LEN_SHIFT) &
2650 OCRDMA_CQE_UD_XFER_LEN_MASK;
2651
2652 if (ocrdma_is_udp_encap_supported(dev)) {
2653 hdr_type = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
2654 OCRDMA_CQE_UD_L3TYPE_SHIFT) &
2655 OCRDMA_CQE_UD_L3TYPE_MASK;
2656 ibwc->wc_flags |= IB_WC_WITH_NETWORK_HDR_TYPE;
2657 ibwc->network_hdr_type = hdr_type;
2658 }
2659
Parav Panditfe2caef2012-03-21 04:09:06 +05302660 return status;
2661}
2662
2663static void ocrdma_update_free_srq_cqe(struct ib_wc *ibwc,
2664 struct ocrdma_cqe *cqe,
2665 struct ocrdma_qp *qp)
2666{
2667 unsigned long flags;
2668 struct ocrdma_srq *srq;
2669 u32 wqe_idx;
2670
2671 srq = get_ocrdma_srq(qp->ibqp.srq);
Naresh Gottumukkala43a6b402013-08-26 15:27:38 +05302672 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >>
Selvin Xaviercf5788a2014-02-04 11:57:03 +05302673 OCRDMA_CQE_BUFTAG_SHIFT) & srq->rq.max_wqe_idx;
ssh10db287ec2016-12-24 21:50:06 +05302674 BUG_ON(wqe_idx < 1);
Selvin Xaviercf5788a2014-02-04 11:57:03 +05302675
Parav Panditfe2caef2012-03-21 04:09:06 +05302676 ibwc->wr_id = srq->rqe_wr_id_tbl[wqe_idx];
2677 spin_lock_irqsave(&srq->q_lock, flags);
Selvin Xaviercf5788a2014-02-04 11:57:03 +05302678 ocrdma_srq_toggle_bit(srq, wqe_idx - 1);
Parav Panditfe2caef2012-03-21 04:09:06 +05302679 spin_unlock_irqrestore(&srq->q_lock, flags);
2680 ocrdma_hwq_inc_tail(&srq->rq);
2681}
2682
2683static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2684 struct ib_wc *ibwc, bool *polled, bool *stop,
2685 int status)
2686{
2687 bool expand;
Selvin Xavierad56ebb2014-12-18 14:12:59 +05302688 struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
2689
2690 if (status < OCRDMA_MAX_CQE_ERR)
2691 atomic_inc(&dev->cqe_err_stats[status]);
Parav Panditfe2caef2012-03-21 04:09:06 +05302692
2693 /* when hw_rq is empty, but wq is not empty, so continue
2694 * to keep the cqe to get the cq event again.
2695 */
2696 if (is_hw_rq_empty(qp) && !is_hw_sq_empty(qp)) {
2697 if (!qp->srq && (qp->sq_cq == qp->rq_cq)) {
2698 *polled = true;
2699 status = OCRDMA_CQE_WR_FLUSH_ERR;
2700 expand = ocrdma_update_err_scqe(ibwc, cqe, qp, status);
2701 } else {
2702 *polled = false;
2703 *stop = true;
2704 expand = false;
2705 }
Selvin Xaviera96ffb12014-06-10 19:32:19 +05302706 } else if (is_hw_rq_empty(qp)) {
2707 /* Do nothing */
2708 expand = false;
2709 *polled = false;
2710 *stop = false;
Parav Pandita3698a92012-06-11 16:39:20 +05302711 } else {
2712 *polled = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302713 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
Parav Pandita3698a92012-06-11 16:39:20 +05302714 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302715 return expand;
2716}
2717
2718static void ocrdma_poll_success_rcqe(struct ocrdma_qp *qp,
2719 struct ocrdma_cqe *cqe, struct ib_wc *ibwc)
2720{
Devesh Sharma6b062662016-01-28 08:59:57 -05002721 struct ocrdma_dev *dev;
2722
2723 dev = get_ocrdma_dev(qp->ibqp.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302724 ibwc->opcode = IB_WC_RECV;
2725 ibwc->qp = &qp->ibqp;
2726 ibwc->status = IB_WC_SUCCESS;
2727
2728 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI)
Devesh Sharma6b062662016-01-28 08:59:57 -05002729 ocrdma_update_ud_rcqe(dev, ibwc, cqe);
Parav Panditfe2caef2012-03-21 04:09:06 +05302730 else
2731 ibwc->byte_len = le32_to_cpu(cqe->rq.rxlen);
2732
2733 if (is_cqe_imm(cqe)) {
2734 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2735 ibwc->wc_flags |= IB_WC_WITH_IMM;
2736 } else if (is_cqe_wr_imm(cqe)) {
2737 ibwc->opcode = IB_WC_RECV_RDMA_WITH_IMM;
2738 ibwc->ex.imm_data = htonl(le32_to_cpu(cqe->rq.lkey_immdt));
2739 ibwc->wc_flags |= IB_WC_WITH_IMM;
2740 } else if (is_cqe_invalidated(cqe)) {
2741 ibwc->ex.invalidate_rkey = le32_to_cpu(cqe->rq.lkey_immdt);
2742 ibwc->wc_flags |= IB_WC_WITH_INVALIDATE;
2743 }
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302744 if (qp->ibqp.srq) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302745 ocrdma_update_free_srq_cqe(ibwc, cqe, qp);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302746 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302747 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2748 ocrdma_hwq_inc_tail(&qp->rq);
2749 }
2750}
2751
2752static bool ocrdma_poll_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2753 struct ib_wc *ibwc, bool *polled, bool *stop)
2754{
2755 int status;
2756 bool expand = false;
2757
2758 ibwc->wc_flags = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302759 if (qp->qp_type == IB_QPT_UD || qp->qp_type == IB_QPT_GSI) {
Parav Panditfe2caef2012-03-21 04:09:06 +05302760 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2761 OCRDMA_CQE_UD_STATUS_MASK) >>
2762 OCRDMA_CQE_UD_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302763 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302764 status = (le32_to_cpu(cqe->flags_status_srcqpn) &
2765 OCRDMA_CQE_STATUS_MASK) >> OCRDMA_CQE_STATUS_SHIFT;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302766 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302767
2768 if (status == OCRDMA_CQE_SUCCESS) {
2769 *polled = true;
2770 ocrdma_poll_success_rcqe(qp, cqe, ibwc);
2771 } else {
2772 expand = ocrdma_poll_err_rcqe(qp, cqe, ibwc, polled, stop,
2773 status);
2774 }
2775 return expand;
2776}
2777
2778static void ocrdma_change_cq_phase(struct ocrdma_cq *cq, struct ocrdma_cqe *cqe,
2779 u16 cur_getp)
2780{
2781 if (cq->phase_change) {
2782 if (cur_getp == 0)
2783 cq->phase = (~cq->phase & OCRDMA_CQE_VALID);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302784 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302785 /* clear valid bit */
2786 cqe->flags_status_srcqpn = 0;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302787 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302788}
2789
2790static int ocrdma_poll_hwcq(struct ocrdma_cq *cq, int num_entries,
2791 struct ib_wc *ibwc)
2792{
2793 u16 qpn = 0;
2794 int i = 0;
2795 bool expand = false;
2796 int polled_hw_cqes = 0;
2797 struct ocrdma_qp *qp = NULL;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302798 struct ocrdma_dev *dev = get_ocrdma_dev(cq->ibcq.device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302799 struct ocrdma_cqe *cqe;
2800 u16 cur_getp; bool polled = false; bool stop = false;
2801
2802 cur_getp = cq->getp;
2803 while (num_entries) {
2804 cqe = cq->va + cur_getp;
2805 /* check whether valid cqe or not */
2806 if (!is_cqe_valid(cq, cqe))
2807 break;
2808 qpn = (le32_to_cpu(cqe->cmn.qpn) & OCRDMA_CQE_QPN_MASK);
2809 /* ignore discarded cqe */
2810 if (qpn == 0)
2811 goto skip_cqe;
2812 qp = dev->qp_tbl[qpn];
2813 BUG_ON(qp == NULL);
2814
2815 if (is_cqe_for_sq(cqe)) {
2816 expand = ocrdma_poll_scqe(qp, cqe, ibwc, &polled,
2817 &stop);
2818 } else {
2819 expand = ocrdma_poll_rcqe(qp, cqe, ibwc, &polled,
2820 &stop);
2821 }
2822 if (expand)
2823 goto expand_cqe;
2824 if (stop)
2825 goto stop_cqe;
2826 /* clear qpn to avoid duplicate processing by discard_cqe() */
2827 cqe->cmn.qpn = 0;
2828skip_cqe:
2829 polled_hw_cqes += 1;
2830 cur_getp = (cur_getp + 1) % cq->max_hw_cqe;
2831 ocrdma_change_cq_phase(cq, cqe, cur_getp);
2832expand_cqe:
2833 if (polled) {
2834 num_entries -= 1;
2835 i += 1;
2836 ibwc = ibwc + 1;
2837 polled = false;
2838 }
2839 }
2840stop_cqe:
2841 cq->getp = cur_getp;
Devesh Sharmab41f7852016-02-11 00:21:52 -05002842
2843 if (polled_hw_cqes)
2844 ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
Devesh Sharmaea617622014-02-04 11:56:54 +05302845
Parav Panditfe2caef2012-03-21 04:09:06 +05302846 return i;
2847}
2848
2849/* insert error cqe if the QP's SQ or RQ's CQ matches the CQ under poll. */
2850static int ocrdma_add_err_cqe(struct ocrdma_cq *cq, int num_entries,
2851 struct ocrdma_qp *qp, struct ib_wc *ibwc)
2852{
2853 int err_cqes = 0;
2854
2855 while (num_entries) {
2856 if (is_hw_sq_empty(qp) && is_hw_rq_empty(qp))
2857 break;
2858 if (!is_hw_sq_empty(qp) && qp->sq_cq == cq) {
2859 ocrdma_update_wc(qp, ibwc, qp->sq.tail);
2860 ocrdma_hwq_inc_tail(&qp->sq);
2861 } else if (!is_hw_rq_empty(qp) && qp->rq_cq == cq) {
2862 ibwc->wr_id = qp->rqe_wr_id_tbl[qp->rq.tail];
2863 ocrdma_hwq_inc_tail(&qp->rq);
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302864 } else {
Parav Panditfe2caef2012-03-21 04:09:06 +05302865 return err_cqes;
Naresh Gottumukkalaf99b1642013-08-07 12:52:32 +05302866 }
Parav Panditfe2caef2012-03-21 04:09:06 +05302867 ibwc->byte_len = 0;
2868 ibwc->status = IB_WC_WR_FLUSH_ERR;
2869 ibwc = ibwc + 1;
2870 err_cqes += 1;
2871 num_entries -= 1;
2872 }
2873 return err_cqes;
2874}
2875
2876int ocrdma_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc)
2877{
2878 int cqes_to_poll = num_entries;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302879 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2880 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302881 int num_os_cqe = 0, err_cqes = 0;
2882 struct ocrdma_qp *qp;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302883 unsigned long flags;
Parav Panditfe2caef2012-03-21 04:09:06 +05302884
2885 /* poll cqes from adapter CQ */
2886 spin_lock_irqsave(&cq->cq_lock, flags);
2887 num_os_cqe = ocrdma_poll_hwcq(cq, cqes_to_poll, wc);
2888 spin_unlock_irqrestore(&cq->cq_lock, flags);
2889 cqes_to_poll -= num_os_cqe;
2890
2891 if (cqes_to_poll) {
2892 wc = wc + num_os_cqe;
2893 /* adapter returns single error cqe when qp moves to
2894 * error state. So insert error cqes with wc_status as
2895 * FLUSHED for pending WQEs and RQEs of QP's SQ and RQ
2896 * respectively which uses this CQ.
2897 */
2898 spin_lock_irqsave(&dev->flush_q_lock, flags);
2899 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
2900 if (cqes_to_poll == 0)
2901 break;
2902 err_cqes = ocrdma_add_err_cqe(cq, cqes_to_poll, qp, wc);
2903 cqes_to_poll -= err_cqes;
2904 num_os_cqe += err_cqes;
2905 wc = wc + err_cqes;
2906 }
2907 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
2908 }
2909 return num_os_cqe;
2910}
2911
2912int ocrdma_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags cq_flags)
2913{
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302914 struct ocrdma_cq *cq = get_ocrdma_cq(ibcq);
2915 struct ocrdma_dev *dev = get_ocrdma_dev(ibcq->device);
Parav Panditfe2caef2012-03-21 04:09:06 +05302916 u16 cq_id;
Naresh Gottumukkala1afc0452013-08-07 12:52:33 +05302917 unsigned long flags;
Devesh Sharmaea617622014-02-04 11:56:54 +05302918 bool arm_needed = false, sol_needed = false;
Parav Panditfe2caef2012-03-21 04:09:06 +05302919
Parav Panditfe2caef2012-03-21 04:09:06 +05302920 cq_id = cq->id;
Parav Panditfe2caef2012-03-21 04:09:06 +05302921
2922 spin_lock_irqsave(&cq->cq_lock, flags);
2923 if (cq_flags & IB_CQ_NEXT_COMP || cq_flags & IB_CQ_SOLICITED)
Devesh Sharmaea617622014-02-04 11:56:54 +05302924 arm_needed = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302925 if (cq_flags & IB_CQ_SOLICITED)
Devesh Sharmaea617622014-02-04 11:56:54 +05302926 sol_needed = true;
Parav Panditfe2caef2012-03-21 04:09:06 +05302927
Devesh Sharmab41f7852016-02-11 00:21:52 -05002928 ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
Parav Panditfe2caef2012-03-21 04:09:06 +05302929 spin_unlock_irqrestore(&cq->cq_lock, flags);
Devesh Sharmaea617622014-02-04 11:56:54 +05302930
Parav Panditfe2caef2012-03-21 04:09:06 +05302931 return 0;
2932}
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302933
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002934struct ib_mr *ocrdma_alloc_mr(struct ib_pd *ibpd, enum ib_mr_type mr_type,
2935 u32 max_num_sg, struct ib_udata *udata)
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302936{
2937 int status;
2938 struct ocrdma_mr *mr;
2939 struct ocrdma_pd *pd = get_ocrdma_pd(ibpd);
2940 struct ocrdma_dev *dev = get_ocrdma_dev(ibpd->device);
2941
Sagi Grimbergcacb7d52015-07-30 10:32:43 +03002942 if (mr_type != IB_MR_TYPE_MEM_REG)
2943 return ERR_PTR(-EINVAL);
2944
2945 if (max_num_sg > dev->attr.max_pages_per_frmr)
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302946 return ERR_PTR(-EINVAL);
2947
2948 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
2949 if (!mr)
2950 return ERR_PTR(-ENOMEM);
2951
Sagi Grimberg2eaa1c52015-10-13 19:11:28 +03002952 mr->pages = kcalloc(max_num_sg, sizeof(u64), GFP_KERNEL);
2953 if (!mr->pages) {
2954 status = -ENOMEM;
2955 goto pl_err;
2956 }
2957
Sagi Grimbergcacb7d52015-07-30 10:32:43 +03002958 status = ocrdma_get_pbl_info(dev, mr, max_num_sg);
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302959 if (status)
2960 goto pbl_err;
2961 mr->hwmr.fr_mr = 1;
2962 mr->hwmr.remote_rd = 0;
2963 mr->hwmr.remote_wr = 0;
2964 mr->hwmr.local_rd = 0;
2965 mr->hwmr.local_wr = 0;
2966 mr->hwmr.mw_bind = 0;
2967 status = ocrdma_build_pbl_tbl(dev, &mr->hwmr);
2968 if (status)
2969 goto pbl_err;
2970 status = ocrdma_reg_mr(dev, &mr->hwmr, pd->id, 0);
2971 if (status)
2972 goto mbx_err;
2973 mr->ibmr.rkey = mr->hwmr.lkey;
2974 mr->ibmr.lkey = mr->hwmr.lkey;
Roland Dreier7a1e89d2014-03-17 23:14:17 -07002975 dev->stag_arr[(mr->hwmr.lkey >> 8) & (OCRDMA_MAX_STAG - 1)] =
2976 (unsigned long) mr;
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302977 return &mr->ibmr;
2978mbx_err:
2979 ocrdma_free_mr_pbl_tbl(dev, &mr->hwmr);
2980pbl_err:
Sagi Grimberg2eaa1c52015-10-13 19:11:28 +03002981 kfree(mr->pages);
2982pl_err:
Naresh Gottumukkala7c338802013-08-26 15:27:39 +05302983 kfree(mr);
2984 return ERR_PTR(-ENOMEM);
2985}
2986
Sagi Grimberg2eaa1c52015-10-13 19:11:28 +03002987static int ocrdma_set_page(struct ib_mr *ibmr, u64 addr)
2988{
2989 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
2990
2991 if (unlikely(mr->npages == mr->hwmr.num_pbes))
2992 return -ENOMEM;
2993
2994 mr->pages[mr->npages++] = addr;
2995
2996 return 0;
2997}
2998
Christoph Hellwigff2ba992016-05-03 18:01:04 +02002999int ocrdma_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
Bart Van Assche9aa8b322016-05-12 10:49:15 -07003000 unsigned int *sg_offset)
Sagi Grimberg2eaa1c52015-10-13 19:11:28 +03003001{
3002 struct ocrdma_mr *mr = get_ocrdma_mr(ibmr);
3003
3004 mr->npages = 0;
3005
Christoph Hellwigff2ba992016-05-03 18:01:04 +02003006 return ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, ocrdma_set_page);
Sagi Grimberg2eaa1c52015-10-13 19:11:28 +03003007}