blob: ffbd9a89981e748cd17cfc938f04c646886125bb [file] [log] [blame]
Steve Wisecfdda9d2010-04-21 15:30:06 -07001/*
2 * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040032
33#include <linux/module.h>
Shamir Rabinovitch89944452019-02-07 18:44:49 +020034#include <rdma/uverbs_ioctl.h>
Paul Gortmakere4dd23d2011-05-27 15:35:46 -040035
Steve Wisecfdda9d2010-04-21 15:30:06 -070036#include "iw_cxgb4.h"
37
Vipul Pandya2c974782012-05-18 15:29:28 +053038static int db_delay_usecs = 1;
39module_param(db_delay_usecs, int, 0644);
40MODULE_PARM_DESC(db_delay_usecs, "Usecs to delay awaiting db fifo to drain");
41
Steve Wisea9c77192011-03-11 22:30:11 +000042static int ocqp_support = 1;
Steve Wisec6d7b262010-09-13 11:23:57 -050043module_param(ocqp_support, int, 0644);
Steve Wisea9c77192011-03-11 22:30:11 +000044MODULE_PARM_DESC(ocqp_support, "Support on-chip SQs (default=1)");
Steve Wisec6d7b262010-09-13 11:23:57 -050045
Vipul Pandya3cbdb922013-03-14 05:08:59 +000046int db_fc_threshold = 1000;
Vipul Pandya422eea02012-05-18 15:29:30 +053047module_param(db_fc_threshold, int, 0644);
Vipul Pandya3cbdb922013-03-14 05:08:59 +000048MODULE_PARM_DESC(db_fc_threshold,
49 "QP count/threshold that triggers"
50 " automatic db flow control mode (default = 1000)");
51
52int db_coalescing_threshold;
53module_param(db_coalescing_threshold, int, 0644);
54MODULE_PARM_DESC(db_coalescing_threshold,
55 "QP count/threshold that triggers"
56 " disabling db coalescing (default = 0)");
Vipul Pandya422eea02012-05-18 15:29:30 +053057
Vipul Pandya42b6a942013-03-14 05:09:01 +000058static int max_fr_immd = T4_MAX_FR_IMMD;
59module_param(max_fr_immd, int, 0644);
Colin Ian Kingff5eefe2019-04-16 15:38:04 +010060MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immediate");
Vipul Pandya42b6a942013-03-14 05:09:01 +000061
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +053062static int alloc_ird(struct c4iw_dev *dev, u32 ird)
63{
64 int ret = 0;
65
Matthew Wilcox2f431292019-02-20 16:20:51 -080066 xa_lock_irq(&dev->qps);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +053067 if (ird <= dev->avail_ird)
68 dev->avail_ird -= ird;
69 else
70 ret = -ENOMEM;
Matthew Wilcox2f431292019-02-20 16:20:51 -080071 xa_unlock_irq(&dev->qps);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +053072
73 if (ret)
74 dev_warn(&dev->rdev.lldi.pdev->dev,
75 "device IRD resources exhausted\n");
76
77 return ret;
78}
79
80static void free_ird(struct c4iw_dev *dev, int ird)
81{
Matthew Wilcox2f431292019-02-20 16:20:51 -080082 xa_lock_irq(&dev->qps);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +053083 dev->avail_ird += ird;
Matthew Wilcox2f431292019-02-20 16:20:51 -080084 xa_unlock_irq(&dev->qps);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +053085}
86
Steve Wise2f5b48c2010-09-10 11:15:36 -050087static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
88{
89 unsigned long flag;
90 spin_lock_irqsave(&qhp->lock, flag);
91 qhp->attr.state = state;
92 spin_unlock_irqrestore(&qhp->lock, flag);
93}
94
Steve Wisec6d7b262010-09-13 11:23:57 -050095static void dealloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
96{
97 c4iw_ocqp_pool_free(rdev, sq->dma_addr, sq->memsize);
98}
99
100static void dealloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
101{
102 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue,
Christoph Hellwig18b01b12018-10-09 16:08:22 +0200103 dma_unmap_addr(sq, mapping));
Steve Wisec6d7b262010-09-13 11:23:57 -0500104}
105
106static void dealloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
107{
108 if (t4_sq_onchip(sq))
109 dealloc_oc_sq(rdev, sq);
110 else
111 dealloc_host_sq(rdev, sq);
112}
113
114static int alloc_oc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
115{
Vipul Pandyaf079af72013-03-14 05:08:58 +0000116 if (!ocqp_support || !ocqp_supported(&rdev->lldi))
Steve Wisec6d7b262010-09-13 11:23:57 -0500117 return -ENOSYS;
118 sq->dma_addr = c4iw_ocqp_pool_alloc(rdev, sq->memsize);
119 if (!sq->dma_addr)
120 return -ENOMEM;
121 sq->phys_addr = rdev->oc_mw_pa + sq->dma_addr -
122 rdev->lldi.vr->ocq.start;
123 sq->queue = (__force union t4_wr *)(rdev->oc_mw_kva + sq->dma_addr -
124 rdev->lldi.vr->ocq.start);
125 sq->flags |= T4_SQ_ONCHIP;
126 return 0;
127}
128
129static int alloc_host_sq(struct c4iw_rdev *rdev, struct t4_sq *sq)
130{
131 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize,
132 &(sq->dma_addr), GFP_KERNEL);
133 if (!sq->queue)
134 return -ENOMEM;
135 sq->phys_addr = virt_to_phys(sq->queue);
Christoph Hellwig18b01b12018-10-09 16:08:22 +0200136 dma_unmap_addr_set(sq, mapping, sq->dma_addr);
Steve Wisec6d7b262010-09-13 11:23:57 -0500137 return 0;
138}
139
Thadeu Lima de Souza Cascardo5b0c2752013-04-01 20:13:39 +0000140static int alloc_sq(struct c4iw_rdev *rdev, struct t4_sq *sq, int user)
141{
142 int ret = -ENOSYS;
143 if (user)
144 ret = alloc_oc_sq(rdev, sq);
145 if (ret)
146 ret = alloc_host_sq(rdev, sq);
147 return ret;
148}
149
Steve Wisecfdda9d2010-04-21 15:30:06 -0700150static int destroy_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530151 struct c4iw_dev_ucontext *uctx, int has_rq)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700152{
153 /*
154 * uP clears EQ contexts when the connection exits rdma mode,
155 * so no need to post a RESET WR for these EQs.
156 */
Steve Wisec6d7b262010-09-13 11:23:57 -0500157 dealloc_sq(rdev, &wq->sq);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700158 kfree(wq->sq.sw_sq);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700159 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530160
161 if (has_rq) {
162 dma_free_coherent(&rdev->lldi.pdev->dev,
163 wq->rq.memsize, wq->rq.queue,
164 dma_unmap_addr(&wq->rq, mapping));
165 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
166 kfree(wq->rq.sw_rq);
167 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
168 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700169 return 0;
170}
171
Hariprasad S74217d42015-06-09 18:23:12 +0530172/*
173 * Determine the BAR2 virtual address and qid. If pbar2_pa is not NULL,
174 * then this is a user mapping so compute the page-aligned physical address
175 * for mapping.
176 */
177void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
178 enum cxgb4_bar2_qtype qtype,
179 unsigned int *pbar2_qid, u64 *pbar2_pa)
180{
181 u64 bar2_qoffset;
182 int ret;
183
184 ret = cxgb4_bar2_sge_qregs(rdev->lldi.ports[0], qid, qtype,
185 pbar2_pa ? 1 : 0,
186 &bar2_qoffset, pbar2_qid);
187 if (ret)
188 return NULL;
189
190 if (pbar2_pa)
191 *pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
Hariprasad S32cc92c2016-04-05 10:23:48 +0530192
193 if (is_t4(rdev->lldi.adapter_type))
194 return NULL;
195
Hariprasad S74217d42015-06-09 18:23:12 +0530196 return rdev->bar2_kva + bar2_qoffset;
197}
198
Steve Wisecfdda9d2010-04-21 15:30:06 -0700199static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
200 struct t4_cq *rcq, struct t4_cq *scq,
Steve Wise7088a9b2017-09-26 13:11:36 -0700201 struct c4iw_dev_ucontext *uctx,
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530202 struct c4iw_wr_wait *wr_waitp,
203 int need_rq)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700204{
205 int user = (uctx != &rdev->uctx);
206 struct fw_ri_res_wr *res_wr;
207 struct fw_ri_res *res;
208 int wr_len;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700209 struct sk_buff *skb;
Vipul Pandya9919d5b2013-03-14 05:09:04 +0000210 int ret = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700211 int eqsize;
212
213 wq->sq.qid = c4iw_get_qpid(rdev, uctx);
214 if (!wq->sq.qid)
215 return -ENOMEM;
216
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530217 if (need_rq) {
218 wq->rq.qid = c4iw_get_qpid(rdev, uctx);
219 if (!wq->rq.qid) {
220 ret = -ENOMEM;
221 goto free_sq_qid;
222 }
Emil Goodec079c282012-08-19 17:59:40 +0000223 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700224
225 if (!user) {
Kees Cook6396bb22018-06-12 14:03:40 -0700226 wq->sq.sw_sq = kcalloc(wq->sq.size, sizeof(*wq->sq.sw_sq),
227 GFP_KERNEL);
Emil Goodec079c282012-08-19 17:59:40 +0000228 if (!wq->sq.sw_sq) {
229 ret = -ENOMEM;
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530230 goto free_rq_qid;//FIXME
Emil Goodec079c282012-08-19 17:59:40 +0000231 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700232
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530233 if (need_rq) {
234 wq->rq.sw_rq = kcalloc(wq->rq.size,
235 sizeof(*wq->rq.sw_rq),
236 GFP_KERNEL);
237 if (!wq->rq.sw_rq) {
238 ret = -ENOMEM;
239 goto free_sw_sq;
240 }
Emil Goodec079c282012-08-19 17:59:40 +0000241 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700242 }
243
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530244 if (need_rq) {
245 /*
246 * RQT must be a power of 2 and at least 16 deep.
247 */
248 wq->rq.rqt_size =
249 roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
250 wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
251 if (!wq->rq.rqt_hwaddr) {
252 ret = -ENOMEM;
253 goto free_sw_rq;
254 }
Emil Goodec079c282012-08-19 17:59:40 +0000255 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700256
Thadeu Lima de Souza Cascardo5b0c2752013-04-01 20:13:39 +0000257 ret = alloc_sq(rdev, &wq->sq, user);
258 if (ret)
259 goto free_hwaddr;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700260 memset(wq->sq.queue, 0, wq->sq.memsize);
FUJITA Tomonorif38926a2010-06-03 05:37:50 +0000261 dma_unmap_addr_set(&wq->sq, mapping, wq->sq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700262
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530263 if (need_rq) {
264 wq->rq.queue = dma_alloc_coherent(&rdev->lldi.pdev->dev,
265 wq->rq.memsize,
266 &wq->rq.dma_addr,
267 GFP_KERNEL);
268 if (!wq->rq.queue) {
269 ret = -ENOMEM;
270 goto free_sq;
271 }
272 pr_debug("sq base va 0x%p pa 0x%llx rq base va 0x%p pa 0x%llx\n",
273 wq->sq.queue,
274 (unsigned long long)virt_to_phys(wq->sq.queue),
275 wq->rq.queue,
276 (unsigned long long)virt_to_phys(wq->rq.queue));
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530277 dma_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
Wei Yongjun55e57a72013-03-15 09:42:12 +0000278 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700279
280 wq->db = rdev->lldi.db_reg;
Steve Wisefa658a92014-04-09 09:38:25 -0500281
Nathan Chancellor1b571082018-09-24 12:29:03 -0700282 wq->sq.bar2_va = c4iw_bar2_addrs(rdev, wq->sq.qid,
283 CXGB4_BAR2_QTYPE_EGRESS,
Hariprasad S74217d42015-06-09 18:23:12 +0530284 &wq->sq.bar2_qid,
285 user ? &wq->sq.bar2_pa : NULL);
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530286 if (need_rq)
287 wq->rq.bar2_va = c4iw_bar2_addrs(rdev, wq->rq.qid,
Nathan Chancellor1b571082018-09-24 12:29:03 -0700288 CXGB4_BAR2_QTYPE_EGRESS,
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530289 &wq->rq.bar2_qid,
290 user ? &wq->rq.bar2_pa : NULL);
Hariprasad S74217d42015-06-09 18:23:12 +0530291
292 /*
293 * User mode must have bar2 access.
294 */
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530295 if (user && (!wq->sq.bar2_pa || (need_rq && !wq->rq.bar2_pa))) {
Joe Perches700456b2017-02-09 14:23:50 -0800296 pr_warn("%s: sqid %u or rqid %u not in BAR2 range\n",
Hariprasad S74217d42015-06-09 18:23:12 +0530297 pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
Jiapeng Chongaeb27bb2021-06-01 19:07:49 +0800298 ret = -EINVAL;
Hariprasad S74217d42015-06-09 18:23:12 +0530299 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700300 }
Hariprasad S74217d42015-06-09 18:23:12 +0530301
Steve Wisecfdda9d2010-04-21 15:30:06 -0700302 wq->rdev = rdev;
303 wq->rq.msn = 1;
304
305 /* build fw_ri_res_wr */
Leon Romanovsky34d56892019-05-20 09:54:31 +0300306 wr_len = sizeof(*res_wr) + 2 * sizeof(*res);
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530307 if (need_rq)
308 wr_len += sizeof(*res);
David Rientjesd3c814e2010-07-21 02:44:56 +0000309 skb = alloc_skb(wr_len, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700310 if (!skb) {
311 ret = -ENOMEM;
Emil Goodec079c282012-08-19 17:59:40 +0000312 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700313 }
314 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
315
yuan linyude77b962017-06-18 22:48:17 +0800316 res_wr = __skb_put_zero(skb, wr_len);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700317 res_wr->op_nres = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530318 FW_WR_OP_V(FW_RI_RES_WR) |
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530319 FW_RI_RES_WR_NRES_V(need_rq ? 2 : 1) |
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +0530320 FW_WR_COMPL_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700321 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
Steve Wise7088a9b2017-09-26 13:11:36 -0700322 res_wr->cookie = (uintptr_t)wr_waitp;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700323 res = res_wr->res;
324 res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
325 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
326
327 /*
328 * eqsize is the number of 64B entries plus the status page size.
329 */
Hariprasad Shenai04e10e22014-07-14 21:34:51 +0530330 eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
331 rdev->hw_queue.t4_eq_status_entries;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700332
333 res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530334 FW_RI_RES_WR_HOSTFCMODE_V(0) | /* no host cidx updates */
335 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
336 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
337 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_ONCHIP_F : 0) |
338 FW_RI_RES_WR_IQID_V(scq->cqid));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700339 res->u.sqrq.dcaen_to_eqsize = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530340 FW_RI_RES_WR_DCAEN_V(0) |
341 FW_RI_RES_WR_DCACPU_V(0) |
342 FW_RI_RES_WR_FBMIN_V(2) |
Steve Wiseb414fa02016-12-15 08:09:35 -0800343 (t4_sq_onchip(&wq->sq) ? FW_RI_RES_WR_FBMAX_V(2) :
344 FW_RI_RES_WR_FBMAX_V(3)) |
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530345 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
346 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
347 FW_RI_RES_WR_EQSIZE_V(eqsize));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700348 res->u.sqrq.eqid = cpu_to_be32(wq->sq.qid);
349 res->u.sqrq.eqaddr = cpu_to_be64(wq->sq.dma_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700350
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530351 if (need_rq) {
352 res++;
353 res->u.sqrq.restype = FW_RI_RES_TYPE_RQ;
354 res->u.sqrq.op = FW_RI_RES_OP_WRITE;
355
356 /*
357 * eqsize is the number of 64B entries plus the status page size
358 */
359 eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
360 rdev->hw_queue.t4_eq_status_entries;
361 res->u.sqrq.fetchszm_to_iqid =
362 /* no host cidx updates */
363 cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
364 /* don't keep in chip cache */
365 FW_RI_RES_WR_CPRIO_V(0) |
366 /* set by uP at ri_init time */
367 FW_RI_RES_WR_PCIECHN_V(0) |
368 FW_RI_RES_WR_IQID_V(rcq->cqid));
369 res->u.sqrq.dcaen_to_eqsize =
370 cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
371 FW_RI_RES_WR_DCACPU_V(0) |
372 FW_RI_RES_WR_FBMIN_V(2) |
373 FW_RI_RES_WR_FBMAX_V(3) |
374 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
375 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
376 FW_RI_RES_WR_EQSIZE_V(eqsize));
377 res->u.sqrq.eqid = cpu_to_be32(wq->rq.qid);
378 res->u.sqrq.eqaddr = cpu_to_be64(wq->rq.dma_addr);
379 }
Steve Wisecfdda9d2010-04-21 15:30:06 -0700380
Steve Wise7088a9b2017-09-26 13:11:36 -0700381 c4iw_init_wr_wait(wr_waitp);
Steve Wise2015f262017-09-26 13:13:17 -0700382 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->sq.qid, __func__);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700383 if (ret)
Emil Goodec079c282012-08-19 17:59:40 +0000384 goto free_dma;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700385
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530386 pr_debug("sqid 0x%x rqid 0x%x kdb 0x%p sq_bar2_addr %p rq_bar2_addr %p\n",
387 wq->sq.qid, wq->rq.qid, wq->db,
Joe Perchesa9a42882017-02-09 14:23:51 -0800388 wq->sq.bar2_va, wq->rq.bar2_va);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700389
390 return 0;
Emil Goodec079c282012-08-19 17:59:40 +0000391free_dma:
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530392 if (need_rq)
393 dma_free_coherent(&rdev->lldi.pdev->dev,
394 wq->rq.memsize, wq->rq.queue,
395 dma_unmap_addr(&wq->rq, mapping));
Emil Goodec079c282012-08-19 17:59:40 +0000396free_sq:
Steve Wisec6d7b262010-09-13 11:23:57 -0500397 dealloc_sq(rdev, &wq->sq);
Emil Goodec079c282012-08-19 17:59:40 +0000398free_hwaddr:
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530399 if (need_rq)
400 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size);
Emil Goodec079c282012-08-19 17:59:40 +0000401free_sw_rq:
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530402 if (need_rq)
403 kfree(wq->rq.sw_rq);
Emil Goodec079c282012-08-19 17:59:40 +0000404free_sw_sq:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700405 kfree(wq->sq.sw_sq);
Emil Goodec079c282012-08-19 17:59:40 +0000406free_rq_qid:
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530407 if (need_rq)
408 c4iw_put_qpid(rdev, wq->rq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000409free_sq_qid:
Steve Wisecfdda9d2010-04-21 15:30:06 -0700410 c4iw_put_qpid(rdev, wq->sq.qid, uctx);
Emil Goodec079c282012-08-19 17:59:40 +0000411 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700412}
413
Steve Wised37ac312010-06-10 19:03:00 +0000414static int build_immd(struct t4_sq *sq, struct fw_ri_immd *immdp,
Bart Van Asschef696bf62018-07-18 09:25:14 -0700415 const struct ib_send_wr *wr, int max, u32 *plenp)
Steve Wised37ac312010-06-10 19:03:00 +0000416{
417 u8 *dstp, *srcp;
418 u32 plen = 0;
419 int i;
420 int rem, len;
421
422 dstp = (u8 *)immdp->data;
423 for (i = 0; i < wr->num_sge; i++) {
424 if ((plen + wr->sg_list[i].length) > max)
425 return -EMSGSIZE;
426 srcp = (u8 *)(unsigned long)wr->sg_list[i].addr;
427 plen += wr->sg_list[i].length;
428 rem = wr->sg_list[i].length;
429 while (rem) {
430 if (dstp == (u8 *)&sq->queue[sq->size])
431 dstp = (u8 *)sq->queue;
432 if (rem <= (u8 *)&sq->queue[sq->size] - dstp)
433 len = rem;
434 else
435 len = (u8 *)&sq->queue[sq->size] - dstp;
436 memcpy(dstp, srcp, len);
437 dstp += len;
438 srcp += len;
439 rem -= len;
440 }
441 }
Leon Romanovsky34d56892019-05-20 09:54:31 +0300442 len = roundup(plen + sizeof(*immdp), 16) - (plen + sizeof(*immdp));
Steve Wise13fecb82010-09-10 11:14:53 -0500443 if (len)
444 memset(dstp, 0, len);
Steve Wised37ac312010-06-10 19:03:00 +0000445 immdp->op = FW_RI_DATA_IMMD;
446 immdp->r1 = 0;
447 immdp->r2 = 0;
448 immdp->immdlen = cpu_to_be32(plen);
449 *plenp = plen;
450 return 0;
451}
452
453static int build_isgl(__be64 *queue_start, __be64 *queue_end,
454 struct fw_ri_isgl *isglp, struct ib_sge *sg_list,
455 int num_sge, u32 *plenp)
456
Steve Wisecfdda9d2010-04-21 15:30:06 -0700457{
458 int i;
Steve Wised37ac312010-06-10 19:03:00 +0000459 u32 plen = 0;
Potnuri Bharat Teja94245f42018-08-02 11:33:04 +0530460 __be64 *flitp;
461
462 if ((__be64 *)isglp == queue_end)
463 isglp = (struct fw_ri_isgl *)queue_start;
464
465 flitp = (__be64 *)isglp->sge;
Steve Wised37ac312010-06-10 19:03:00 +0000466
467 for (i = 0; i < num_sge; i++) {
468 if ((plen + sg_list[i].length) < plen)
469 return -EMSGSIZE;
470 plen += sg_list[i].length;
471 *flitp = cpu_to_be64(((u64)sg_list[i].lkey << 32) |
472 sg_list[i].length);
473 if (++flitp == queue_end)
474 flitp = queue_start;
475 *flitp = cpu_to_be64(sg_list[i].addr);
476 if (++flitp == queue_end)
477 flitp = queue_start;
478 }
Steve Wise13fecb82010-09-10 11:14:53 -0500479 *flitp = (__force __be64)0;
Steve Wised37ac312010-06-10 19:03:00 +0000480 isglp->op = FW_RI_DATA_ISGL;
481 isglp->r1 = 0;
482 isglp->nsge = cpu_to_be16(num_sge);
483 isglp->r2 = 0;
484 if (plenp)
485 *plenp = plen;
486 return 0;
487}
488
489static int build_rdma_send(struct t4_sq *sq, union t4_wr *wqe,
Bart Van Asschef696bf62018-07-18 09:25:14 -0700490 const struct ib_send_wr *wr, u8 *len16)
Steve Wised37ac312010-06-10 19:03:00 +0000491{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700492 u32 plen;
493 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000494 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700495
496 if (wr->num_sge > T4_MAX_SEND_SGE)
497 return -EINVAL;
498 switch (wr->opcode) {
499 case IB_WR_SEND:
500 if (wr->send_flags & IB_SEND_SOLICITED)
501 wqe->send.sendop_pkd = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530502 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700503 else
504 wqe->send.sendop_pkd = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530505 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700506 wqe->send.stag_inv = 0;
507 break;
508 case IB_WR_SEND_WITH_INV:
509 if (wr->send_flags & IB_SEND_SOLICITED)
510 wqe->send.sendop_pkd = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530511 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_SE_INV));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700512 else
513 wqe->send.sendop_pkd = cpu_to_be32(
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +0530514 FW_RI_SEND_WR_SENDOP_V(FW_RI_SEND_WITH_INV));
Steve Wisecfdda9d2010-04-21 15:30:06 -0700515 wqe->send.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
516 break;
517
518 default:
519 return -EINVAL;
520 }
Steve Wisec3f98fa2014-04-09 09:38:27 -0500521 wqe->send.r3 = 0;
522 wqe->send.r4 = 0;
Steve Wised37ac312010-06-10 19:03:00 +0000523
Steve Wisecfdda9d2010-04-21 15:30:06 -0700524 plen = 0;
525 if (wr->num_sge) {
526 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000527 ret = build_immd(sq, wqe->send.u.immd_src, wr,
528 T4_MAX_SEND_INLINE, &plen);
529 if (ret)
530 return ret;
Leon Romanovsky34d56892019-05-20 09:54:31 +0300531 size = sizeof(wqe->send) + sizeof(struct fw_ri_immd) +
Steve Wisecfdda9d2010-04-21 15:30:06 -0700532 plen;
533 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000534 ret = build_isgl((__be64 *)sq->queue,
535 (__be64 *)&sq->queue[sq->size],
536 wqe->send.u.isgl_src,
537 wr->sg_list, wr->num_sge, &plen);
538 if (ret)
539 return ret;
Leon Romanovsky34d56892019-05-20 09:54:31 +0300540 size = sizeof(wqe->send) + sizeof(struct fw_ri_isgl) +
Steve Wisecfdda9d2010-04-21 15:30:06 -0700541 wr->num_sge * sizeof(struct fw_ri_sge);
542 }
543 } else {
544 wqe->send.u.immd_src[0].op = FW_RI_DATA_IMMD;
545 wqe->send.u.immd_src[0].r1 = 0;
546 wqe->send.u.immd_src[0].r2 = 0;
547 wqe->send.u.immd_src[0].immdlen = 0;
Leon Romanovsky34d56892019-05-20 09:54:31 +0300548 size = sizeof(wqe->send) + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000549 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700550 }
551 *len16 = DIV_ROUND_UP(size, 16);
552 wqe->send.plen = cpu_to_be32(plen);
553 return 0;
554}
555
Steve Wised37ac312010-06-10 19:03:00 +0000556static int build_rdma_write(struct t4_sq *sq, union t4_wr *wqe,
Bart Van Asschef696bf62018-07-18 09:25:14 -0700557 const struct ib_send_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700558{
Steve Wisecfdda9d2010-04-21 15:30:06 -0700559 u32 plen;
560 int size;
Steve Wised37ac312010-06-10 19:03:00 +0000561 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700562
Steve Wised37ac312010-06-10 19:03:00 +0000563 if (wr->num_sge > T4_MAX_SEND_SGE)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700564 return -EINVAL;
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +0530565
566 /*
567 * iWARP protocol supports 64 bit immediate data but rdma api
568 * limits it to 32bit.
569 */
570 if (wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
571 wqe->write.iw_imm_data.ib_imm_data.imm_data32 = wr->ex.imm_data;
572 else
573 wqe->write.iw_imm_data.ib_imm_data.imm_data32 = 0;
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100574 wqe->write.stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
575 wqe->write.to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700576 if (wr->num_sge) {
577 if (wr->send_flags & IB_SEND_INLINE) {
Steve Wised37ac312010-06-10 19:03:00 +0000578 ret = build_immd(sq, wqe->write.u.immd_src, wr,
579 T4_MAX_WRITE_INLINE, &plen);
580 if (ret)
581 return ret;
Leon Romanovsky34d56892019-05-20 09:54:31 +0300582 size = sizeof(wqe->write) + sizeof(struct fw_ri_immd) +
Steve Wisecfdda9d2010-04-21 15:30:06 -0700583 plen;
584 } else {
Steve Wised37ac312010-06-10 19:03:00 +0000585 ret = build_isgl((__be64 *)sq->queue,
586 (__be64 *)&sq->queue[sq->size],
587 wqe->write.u.isgl_src,
588 wr->sg_list, wr->num_sge, &plen);
589 if (ret)
590 return ret;
Leon Romanovsky34d56892019-05-20 09:54:31 +0300591 size = sizeof(wqe->write) + sizeof(struct fw_ri_isgl) +
Steve Wisecfdda9d2010-04-21 15:30:06 -0700592 wr->num_sge * sizeof(struct fw_ri_sge);
593 }
594 } else {
595 wqe->write.u.immd_src[0].op = FW_RI_DATA_IMMD;
596 wqe->write.u.immd_src[0].r1 = 0;
597 wqe->write.u.immd_src[0].r2 = 0;
598 wqe->write.u.immd_src[0].immdlen = 0;
Leon Romanovsky34d56892019-05-20 09:54:31 +0300599 size = sizeof(wqe->write) + sizeof(struct fw_ri_immd);
Steve Wised37ac312010-06-10 19:03:00 +0000600 plen = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700601 }
602 *len16 = DIV_ROUND_UP(size, 16);
603 wqe->write.plen = cpu_to_be32(plen);
604 return 0;
605}
606
Potnuri Bharat Teja94245f42018-08-02 11:33:04 +0530607static void build_immd_cmpl(struct t4_sq *sq, struct fw_ri_immd_cmpl *immdp,
608 struct ib_send_wr *wr)
609{
610 memcpy((u8 *)immdp->data, (u8 *)(uintptr_t)wr->sg_list->addr, 16);
611 memset(immdp->r1, 0, 6);
612 immdp->op = FW_RI_DATA_IMMD;
613 immdp->immdlen = 16;
614}
615
616static void build_rdma_write_cmpl(struct t4_sq *sq,
617 struct fw_ri_rdma_write_cmpl_wr *wcwr,
618 const struct ib_send_wr *wr, u8 *len16)
619{
620 u32 plen;
621 int size;
622
623 /*
624 * This code assumes the struct fields preceding the write isgl
625 * fit in one 64B WR slot. This is because the WQE is built
626 * directly in the dma queue, and wrapping is only handled
627 * by the code buildling sgls. IE the "fixed part" of the wr
628 * structs must all fit in 64B. The WQE build code should probably be
629 * redesigned to avoid this restriction, but for now just add
630 * the BUILD_BUG_ON() to catch if this WQE struct gets too big.
631 */
632 BUILD_BUG_ON(offsetof(struct fw_ri_rdma_write_cmpl_wr, u) > 64);
633
634 wcwr->stag_sink = cpu_to_be32(rdma_wr(wr)->rkey);
635 wcwr->to_sink = cpu_to_be64(rdma_wr(wr)->remote_addr);
Potnuri Bharat Tejae6b7b7d2018-12-24 20:54:51 +0530636 if (wr->next->opcode == IB_WR_SEND)
637 wcwr->stag_inv = 0;
638 else
639 wcwr->stag_inv = cpu_to_be32(wr->next->ex.invalidate_rkey);
Potnuri Bharat Teja94245f42018-08-02 11:33:04 +0530640 wcwr->r2 = 0;
641 wcwr->r3 = 0;
642
643 /* SEND_INV SGL */
644 if (wr->next->send_flags & IB_SEND_INLINE)
645 build_immd_cmpl(sq, &wcwr->u_cmpl.immd_src, wr->next);
646 else
647 build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
648 &wcwr->u_cmpl.isgl_src, wr->next->sg_list, 1, NULL);
649
650 /* WRITE SGL */
651 build_isgl((__be64 *)sq->queue, (__be64 *)&sq->queue[sq->size],
652 wcwr->u.isgl_src, wr->sg_list, wr->num_sge, &plen);
653
654 size = sizeof(*wcwr) + sizeof(struct fw_ri_isgl) +
655 wr->num_sge * sizeof(struct fw_ri_sge);
656 wcwr->plen = cpu_to_be32(plen);
657 *len16 = DIV_ROUND_UP(size, 16);
658}
659
Bart Van Asschef696bf62018-07-18 09:25:14 -0700660static int build_rdma_read(union t4_wr *wqe, const struct ib_send_wr *wr,
661 u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700662{
663 if (wr->num_sge > 1)
664 return -EINVAL;
Ganesh Goudar720336c2017-06-21 19:55:43 +0530665 if (wr->num_sge && wr->sg_list[0].length) {
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100666 wqe->read.stag_src = cpu_to_be32(rdma_wr(wr)->rkey);
667 wqe->read.to_src_hi = cpu_to_be32((u32)(rdma_wr(wr)->remote_addr
Steve Wisecfdda9d2010-04-21 15:30:06 -0700668 >> 32));
Christoph Hellwige622f2f2015-10-08 09:16:33 +0100669 wqe->read.to_src_lo = cpu_to_be32((u32)rdma_wr(wr)->remote_addr);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700670 wqe->read.stag_sink = cpu_to_be32(wr->sg_list[0].lkey);
671 wqe->read.plen = cpu_to_be32(wr->sg_list[0].length);
672 wqe->read.to_sink_hi = cpu_to_be32((u32)(wr->sg_list[0].addr
673 >> 32));
674 wqe->read.to_sink_lo = cpu_to_be32((u32)(wr->sg_list[0].addr));
675 } else {
676 wqe->read.stag_src = cpu_to_be32(2);
677 wqe->read.to_src_hi = 0;
678 wqe->read.to_src_lo = 0;
679 wqe->read.stag_sink = cpu_to_be32(2);
680 wqe->read.plen = 0;
681 wqe->read.to_sink_hi = 0;
682 wqe->read.to_sink_lo = 0;
683 }
684 wqe->read.r2 = 0;
685 wqe->read.r5 = 0;
Leon Romanovsky34d56892019-05-20 09:54:31 +0300686 *len16 = DIV_ROUND_UP(sizeof(wqe->read), 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700687 return 0;
688}
689
Potnuri Bharat Teja94245f42018-08-02 11:33:04 +0530690static void post_write_cmpl(struct c4iw_qp *qhp, const struct ib_send_wr *wr)
691{
692 bool send_signaled = (wr->next->send_flags & IB_SEND_SIGNALED) ||
693 qhp->sq_sig_all;
694 bool write_signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
695 qhp->sq_sig_all;
696 struct t4_swsqe *swsqe;
697 union t4_wr *wqe;
698 u16 write_wrid;
699 u8 len16;
700 u16 idx;
701
702 /*
703 * The sw_sq entries still look like a WRITE and a SEND and consume
704 * 2 slots. The FW WR, however, will be a single uber-WR.
705 */
706 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
707 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
708 build_rdma_write_cmpl(&qhp->wq.sq, &wqe->write_cmpl, wr, &len16);
709
710 /* WRITE swsqe */
711 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
712 swsqe->opcode = FW_RI_RDMA_WRITE;
713 swsqe->idx = qhp->wq.sq.pidx;
714 swsqe->complete = 0;
715 swsqe->signaled = write_signaled;
716 swsqe->flushed = 0;
717 swsqe->wr_id = wr->wr_id;
718 if (c4iw_wr_log) {
719 swsqe->sge_ts =
720 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
721 swsqe->host_time = ktime_get();
722 }
723
724 write_wrid = qhp->wq.sq.pidx;
725
726 /* just bump the sw_sq */
727 qhp->wq.sq.in_use++;
728 if (++qhp->wq.sq.pidx == qhp->wq.sq.size)
729 qhp->wq.sq.pidx = 0;
730
731 /* SEND_WITH_INV swsqe */
732 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
Potnuri Bharat Tejae6b7b7d2018-12-24 20:54:51 +0530733 if (wr->next->opcode == IB_WR_SEND)
734 swsqe->opcode = FW_RI_SEND;
735 else
736 swsqe->opcode = FW_RI_SEND_WITH_INV;
Potnuri Bharat Teja94245f42018-08-02 11:33:04 +0530737 swsqe->idx = qhp->wq.sq.pidx;
738 swsqe->complete = 0;
739 swsqe->signaled = send_signaled;
740 swsqe->flushed = 0;
741 swsqe->wr_id = wr->next->wr_id;
742 if (c4iw_wr_log) {
743 swsqe->sge_ts =
744 cxgb4_read_sge_timestamp(qhp->rhp->rdev.lldi.ports[0]);
745 swsqe->host_time = ktime_get();
746 }
747
748 wqe->write_cmpl.flags_send = send_signaled ? FW_RI_COMPLETION_FLAG : 0;
749 wqe->write_cmpl.wrid_send = qhp->wq.sq.pidx;
750
751 init_wr_hdr(wqe, write_wrid, FW_RI_RDMA_WRITE_CMPL_WR,
752 write_signaled ? FW_RI_COMPLETION_FLAG : 0, len16);
753 t4_sq_produce(&qhp->wq, len16);
754 idx = DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
755
756 t4_ring_sq_db(&qhp->wq, idx, wqe);
757}
758
Steve Wisecfdda9d2010-04-21 15:30:06 -0700759static int build_rdma_recv(struct c4iw_qp *qhp, union t4_recv_wr *wqe,
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700760 const struct ib_recv_wr *wr, u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700761{
Steve Wised37ac312010-06-10 19:03:00 +0000762 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -0700763
Steve Wised37ac312010-06-10 19:03:00 +0000764 ret = build_isgl((__be64 *)qhp->wq.rq.queue,
765 (__be64 *)&qhp->wq.rq.queue[qhp->wq.rq.size],
766 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
767 if (ret)
768 return ret;
Leon Romanovsky34d56892019-05-20 09:54:31 +0300769 *len16 = DIV_ROUND_UP(
770 sizeof(wqe->recv) + wr->num_sge * sizeof(struct fw_ri_sge), 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700771 return 0;
772}
773
Bart Van Assched34ac5c2018-07-18 09:25:32 -0700774static int build_srq_recv(union t4_recv_wr *wqe, const struct ib_recv_wr *wr,
Raju Rangoju6a0b6172018-07-25 21:22:14 +0530775 u8 *len16)
776{
777 int ret;
778
779 ret = build_isgl((__be64 *)wqe, (__be64 *)(wqe + 1),
780 &wqe->recv.isgl, wr->sg_list, wr->num_sge, NULL);
781 if (ret)
782 return ret;
783 *len16 = DIV_ROUND_UP(sizeof(wqe->recv) +
784 wr->num_sge * sizeof(struct fw_ri_sge), 16);
785 return 0;
786}
787
Steve Wise49b53a92016-09-16 07:54:52 -0700788static void build_tpte_memreg(struct fw_ri_fr_nsmr_tpte_wr *fr,
Bart Van Asschef696bf62018-07-18 09:25:14 -0700789 const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
Steve Wise49b53a92016-09-16 07:54:52 -0700790 u8 *len16)
Sagi Grimberg8376b862015-10-13 19:11:30 +0300791{
Steve Wise49b53a92016-09-16 07:54:52 -0700792 __be64 *p = (__be64 *)fr->pbl;
793
794 fr->r2 = cpu_to_be32(0);
795 fr->stag = cpu_to_be32(mhp->ibmr.rkey);
796
797 fr->tpte.valid_to_pdid = cpu_to_be32(FW_RI_TPTE_VALID_F |
798 FW_RI_TPTE_STAGKEY_V((mhp->ibmr.rkey & FW_RI_TPTE_STAGKEY_M)) |
799 FW_RI_TPTE_STAGSTATE_V(1) |
800 FW_RI_TPTE_STAGTYPE_V(FW_RI_STAG_NSMR) |
801 FW_RI_TPTE_PDID_V(mhp->attr.pdid));
802 fr->tpte.locread_to_qpid = cpu_to_be32(
803 FW_RI_TPTE_PERM_V(c4iw_ib_to_tpt_access(wr->access)) |
804 FW_RI_TPTE_ADDRTYPE_V(FW_RI_VA_BASED_TO) |
805 FW_RI_TPTE_PS_V(ilog2(wr->mr->page_size) - 12));
806 fr->tpte.nosnoop_pbladdr = cpu_to_be32(FW_RI_TPTE_PBLADDR_V(
807 PBL_OFF(&mhp->rhp->rdev, mhp->attr.pbl_addr)>>3));
808 fr->tpte.dca_mwbcnt_pstag = cpu_to_be32(0);
809 fr->tpte.len_hi = cpu_to_be32(0);
810 fr->tpte.len_lo = cpu_to_be32(mhp->ibmr.length);
811 fr->tpte.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
812 fr->tpte.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova & 0xffffffff);
813
814 p[0] = cpu_to_be64((u64)mhp->mpl[0]);
815 p[1] = cpu_to_be64((u64)mhp->mpl[1]);
816
817 *len16 = DIV_ROUND_UP(sizeof(*fr), 16);
818}
819
820static int build_memreg(struct t4_sq *sq, union t4_wr *wqe,
Bart Van Asschef696bf62018-07-18 09:25:14 -0700821 const struct ib_reg_wr *wr, struct c4iw_mr *mhp,
822 u8 *len16, bool dsgl_supported)
Steve Wise49b53a92016-09-16 07:54:52 -0700823{
Sagi Grimberg8376b862015-10-13 19:11:30 +0300824 struct fw_ri_immd *imdp;
825 __be64 *p;
826 int i;
827 int pbllen = roundup(mhp->mpl_len * sizeof(u64), 32);
828 int rem;
829
Hariprasad See30f7d2016-02-12 16:10:35 +0530830 if (mhp->mpl_len > t4_max_fr_depth(dsgl_supported && use_dsgl))
Sagi Grimberg8376b862015-10-13 19:11:30 +0300831 return -EINVAL;
832
833 wqe->fr.qpbinde_to_dcacpu = 0;
834 wqe->fr.pgsz_shift = ilog2(wr->mr->page_size) - 12;
835 wqe->fr.addr_type = FW_RI_VA_BASED_TO;
836 wqe->fr.mem_perms = c4iw_ib_to_tpt_access(wr->access);
837 wqe->fr.len_hi = 0;
838 wqe->fr.len_lo = cpu_to_be32(mhp->ibmr.length);
839 wqe->fr.stag = cpu_to_be32(wr->key);
840 wqe->fr.va_hi = cpu_to_be32(mhp->ibmr.iova >> 32);
841 wqe->fr.va_lo_fbo = cpu_to_be32(mhp->ibmr.iova &
842 0xffffffff);
843
Hariprasad See30f7d2016-02-12 16:10:35 +0530844 if (dsgl_supported && use_dsgl && (pbllen > max_fr_immd)) {
Sagi Grimberg8376b862015-10-13 19:11:30 +0300845 struct fw_ri_dsgl *sglp;
846
847 for (i = 0; i < mhp->mpl_len; i++)
848 mhp->mpl[i] = (__force u64)cpu_to_be64((u64)mhp->mpl[i]);
849
850 sglp = (struct fw_ri_dsgl *)(&wqe->fr + 1);
851 sglp->op = FW_RI_DATA_DSGL;
852 sglp->r1 = 0;
853 sglp->nsge = cpu_to_be16(1);
854 sglp->addr0 = cpu_to_be64(mhp->mpl_addr);
855 sglp->len0 = cpu_to_be32(pbllen);
856
857 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*sglp), 16);
858 } else {
859 imdp = (struct fw_ri_immd *)(&wqe->fr + 1);
860 imdp->op = FW_RI_DATA_IMMD;
861 imdp->r1 = 0;
862 imdp->r2 = 0;
863 imdp->immdlen = cpu_to_be32(pbllen);
864 p = (__be64 *)(imdp + 1);
865 rem = pbllen;
866 for (i = 0; i < mhp->mpl_len; i++) {
867 *p = cpu_to_be64((u64)mhp->mpl[i]);
868 rem -= sizeof(*p);
869 if (++p == (__be64 *)&sq->queue[sq->size])
870 p = (__be64 *)sq->queue;
871 }
Sagi Grimberg8376b862015-10-13 19:11:30 +0300872 while (rem) {
873 *p = 0;
874 rem -= sizeof(*p);
875 if (++p == (__be64 *)&sq->queue[sq->size])
876 p = (__be64 *)sq->queue;
877 }
878 *len16 = DIV_ROUND_UP(sizeof(wqe->fr) + sizeof(*imdp)
879 + pbllen, 16);
880 }
881 return 0;
882}
883
Bart Van Asschef696bf62018-07-18 09:25:14 -0700884static int build_inv_stag(union t4_wr *wqe, const struct ib_send_wr *wr,
885 u8 *len16)
Steve Wisecfdda9d2010-04-21 15:30:06 -0700886{
887 wqe->inv.stag_inv = cpu_to_be32(wr->ex.invalidate_rkey);
888 wqe->inv.r2 = 0;
Leon Romanovsky34d56892019-05-20 09:54:31 +0300889 *len16 = DIV_ROUND_UP(sizeof(wqe->inv), 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700890 return 0;
891}
892
893void c4iw_qp_add_ref(struct ib_qp *qp)
894{
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530895 pr_debug("ib_qp %p\n", qp);
Nirranjan Kirubaharanf70baa72019-05-23 00:05:39 -0700896 refcount_inc(&to_c4iw_qp(qp)->qp_refcnt);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700897}
898
899void c4iw_qp_rem_ref(struct ib_qp *qp)
900{
Bharat Potnuri548ddb12017-09-27 13:05:49 +0530901 pr_debug("ib_qp %p\n", qp);
Nirranjan Kirubaharanf70baa72019-05-23 00:05:39 -0700902 if (refcount_dec_and_test(&to_c4iw_qp(qp)->qp_refcnt))
903 complete(&to_c4iw_qp(qp)->qp_rel_comp);
Steve Wisecfdda9d2010-04-21 15:30:06 -0700904}
905
Steve Wise05eb2382014-03-14 21:52:08 +0530906static void add_to_fc_list(struct list_head *head, struct list_head *entry)
907{
908 if (list_empty(entry))
909 list_add_tail(entry, head);
910}
911
912static int ring_kernel_sq_db(struct c4iw_qp *qhp, u16 inc)
913{
914 unsigned long flags;
915
Matthew Wilcox2f431292019-02-20 16:20:51 -0800916 xa_lock_irqsave(&qhp->rhp->qps, flags);
Steve Wise05eb2382014-03-14 21:52:08 +0530917 spin_lock(&qhp->lock);
Steve Wisefa658a92014-04-09 09:38:25 -0500918 if (qhp->rhp->db_state == NORMAL)
Hariprasad S963cab52015-09-23 17:19:27 +0530919 t4_ring_sq_db(&qhp->wq, inc, NULL);
Steve Wisefa658a92014-04-09 09:38:25 -0500920 else {
Steve Wise05eb2382014-03-14 21:52:08 +0530921 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
922 qhp->wq.sq.wq_pidx_inc += inc;
923 }
924 spin_unlock(&qhp->lock);
Matthew Wilcox2f431292019-02-20 16:20:51 -0800925 xa_unlock_irqrestore(&qhp->rhp->qps, flags);
Steve Wise05eb2382014-03-14 21:52:08 +0530926 return 0;
927}
928
929static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
930{
931 unsigned long flags;
932
Matthew Wilcox2f431292019-02-20 16:20:51 -0800933 xa_lock_irqsave(&qhp->rhp->qps, flags);
Steve Wise05eb2382014-03-14 21:52:08 +0530934 spin_lock(&qhp->lock);
Steve Wisefa658a92014-04-09 09:38:25 -0500935 if (qhp->rhp->db_state == NORMAL)
Hariprasad S963cab52015-09-23 17:19:27 +0530936 t4_ring_rq_db(&qhp->wq, inc, NULL);
Steve Wisefa658a92014-04-09 09:38:25 -0500937 else {
Steve Wise05eb2382014-03-14 21:52:08 +0530938 add_to_fc_list(&qhp->rhp->db_fc_list, &qhp->db_fc_entry);
939 qhp->wq.rq.wq_pidx_inc += inc;
940 }
941 spin_unlock(&qhp->lock);
Matthew Wilcox2f431292019-02-20 16:20:51 -0800942 xa_unlock_irqrestore(&qhp->rhp->qps, flags);
Steve Wise05eb2382014-03-14 21:52:08 +0530943 return 0;
944}
945
Steve Wise96a236e2017-12-19 10:29:25 -0800946static int ib_to_fw_opcode(int ib_opcode)
947{
948 int opcode;
949
950 switch (ib_opcode) {
951 case IB_WR_SEND_WITH_INV:
952 opcode = FW_RI_SEND_WITH_INV;
953 break;
954 case IB_WR_SEND:
955 opcode = FW_RI_SEND;
956 break;
957 case IB_WR_RDMA_WRITE:
958 opcode = FW_RI_RDMA_WRITE;
959 break;
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +0530960 case IB_WR_RDMA_WRITE_WITH_IMM:
961 opcode = FW_RI_WRITE_IMMEDIATE;
962 break;
Steve Wise96a236e2017-12-19 10:29:25 -0800963 case IB_WR_RDMA_READ:
964 case IB_WR_RDMA_READ_WITH_INV:
965 opcode = FW_RI_READ_REQ;
966 break;
967 case IB_WR_REG_MR:
968 opcode = FW_RI_FAST_REGISTER;
969 break;
970 case IB_WR_LOCAL_INV:
971 opcode = FW_RI_LOCAL_INV;
972 break;
973 default:
974 opcode = -EINVAL;
975 }
976 return opcode;
977}
978
Bart Van Asschef696bf62018-07-18 09:25:14 -0700979static int complete_sq_drain_wr(struct c4iw_qp *qhp,
980 const struct ib_send_wr *wr)
Steve Wise4fe7c292016-12-22 07:04:59 -0800981{
982 struct t4_cqe cqe = {};
983 struct c4iw_cq *schp;
984 unsigned long flag;
985 struct t4_cq *cq;
Steve Wise96a236e2017-12-19 10:29:25 -0800986 int opcode;
Steve Wise4fe7c292016-12-22 07:04:59 -0800987
988 schp = to_c4iw_cq(qhp->ibqp.send_cq);
989 cq = &schp->cq;
990
Steve Wise96a236e2017-12-19 10:29:25 -0800991 opcode = ib_to_fw_opcode(wr->opcode);
992 if (opcode < 0)
993 return opcode;
994
Steve Wise4fe7c292016-12-22 07:04:59 -0800995 cqe.u.drain_cookie = wr->wr_id;
996 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
Steve Wise96a236e2017-12-19 10:29:25 -0800997 CQE_OPCODE_V(opcode) |
Steve Wise4fe7c292016-12-22 07:04:59 -0800998 CQE_TYPE_V(1) |
999 CQE_SWCQE_V(1) |
Steve Wise96a236e2017-12-19 10:29:25 -08001000 CQE_DRAIN_V(1) |
Steve Wise4fe7c292016-12-22 07:04:59 -08001001 CQE_QPID_V(qhp->wq.sq.qid));
1002
1003 spin_lock_irqsave(&schp->lock, flag);
1004 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
1005 cq->sw_queue[cq->sw_pidx] = cqe;
1006 t4_swcq_produce(cq);
1007 spin_unlock_irqrestore(&schp->lock, flag);
1008
Steve Wisecbb40fa2017-11-09 07:14:43 -08001009 if (t4_clear_cq_armed(&schp->cq)) {
1010 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1011 (*schp->ibcq.comp_handler)(&schp->ibcq,
1012 schp->ibcq.cq_context);
1013 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1014 }
Steve Wise96a236e2017-12-19 10:29:25 -08001015 return 0;
Steve Wise4fe7c292016-12-22 07:04:59 -08001016}
1017
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001018static int complete_sq_drain_wrs(struct c4iw_qp *qhp,
1019 const struct ib_send_wr *wr,
1020 const struct ib_send_wr **bad_wr)
Steve Wised1458732017-12-19 14:02:10 -08001021{
1022 int ret = 0;
1023
1024 while (wr) {
1025 ret = complete_sq_drain_wr(qhp, wr);
1026 if (ret) {
1027 *bad_wr = wr;
1028 break;
1029 }
1030 wr = wr->next;
1031 }
1032 return ret;
Steve Wise4fe7c292016-12-22 07:04:59 -08001033}
1034
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001035static void complete_rq_drain_wr(struct c4iw_qp *qhp,
1036 const struct ib_recv_wr *wr)
Steve Wise4fe7c292016-12-22 07:04:59 -08001037{
1038 struct t4_cqe cqe = {};
1039 struct c4iw_cq *rchp;
1040 unsigned long flag;
1041 struct t4_cq *cq;
1042
1043 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1044 cq = &rchp->cq;
1045
1046 cqe.u.drain_cookie = wr->wr_id;
1047 cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
Steve Wise96a236e2017-12-19 10:29:25 -08001048 CQE_OPCODE_V(FW_RI_SEND) |
Steve Wise4fe7c292016-12-22 07:04:59 -08001049 CQE_TYPE_V(0) |
1050 CQE_SWCQE_V(1) |
Steve Wise96a236e2017-12-19 10:29:25 -08001051 CQE_DRAIN_V(1) |
Steve Wise4fe7c292016-12-22 07:04:59 -08001052 CQE_QPID_V(qhp->wq.sq.qid));
1053
1054 spin_lock_irqsave(&rchp->lock, flag);
1055 cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
1056 cq->sw_queue[cq->sw_pidx] = cqe;
1057 t4_swcq_produce(cq);
1058 spin_unlock_irqrestore(&rchp->lock, flag);
1059
Steve Wisecbb40fa2017-11-09 07:14:43 -08001060 if (t4_clear_cq_armed(&rchp->cq)) {
1061 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1062 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1063 rchp->ibcq.cq_context);
1064 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1065 }
Steve Wise4fe7c292016-12-22 07:04:59 -08001066}
1067
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001068static void complete_rq_drain_wrs(struct c4iw_qp *qhp,
1069 const struct ib_recv_wr *wr)
Steve Wised1458732017-12-19 14:02:10 -08001070{
1071 while (wr) {
1072 complete_rq_drain_wr(qhp, wr);
1073 wr = wr->next;
1074 }
1075}
1076
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001077int c4iw_post_send(struct ib_qp *ibqp, const struct ib_send_wr *wr,
1078 const struct ib_send_wr **bad_wr)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001079{
1080 int err = 0;
1081 u8 len16 = 0;
1082 enum fw_wr_opcodes fw_opcode = 0;
1083 enum fw_ri_wr_flags fw_flags;
1084 struct c4iw_qp *qhp;
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +05301085 struct c4iw_dev *rhp;
Steve Wisefa658a92014-04-09 09:38:25 -05001086 union t4_wr *wqe = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001087 u32 num_wrs;
1088 struct t4_swsqe *swsqe;
1089 unsigned long flag;
1090 u16 idx = 0;
1091
1092 qhp = to_c4iw_qp(ibqp);
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +05301093 rhp = qhp->rhp;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001094 spin_lock_irqsave(&qhp->lock, flag);
Steve Wisec058ecf2017-11-27 13:16:32 -08001095
1096 /*
1097 * If the qp has been flushed, then just insert a special
1098 * drain cqe.
1099 */
1100 if (qhp->wq.flushed) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001101 spin_unlock_irqrestore(&qhp->lock, flag);
Steve Wised1458732017-12-19 14:02:10 -08001102 err = complete_sq_drain_wrs(qhp, wr, bad_wr);
Steve Wise4fe7c292016-12-22 07:04:59 -08001103 return err;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001104 }
1105 num_wrs = t4_sq_avail(&qhp->wq);
1106 if (num_wrs == 0) {
1107 spin_unlock_irqrestore(&qhp->lock, flag);
Steve Wise4ff522e2016-10-18 14:04:39 -07001108 *bad_wr = wr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001109 return -ENOMEM;
1110 }
Potnuri Bharat Teja94245f42018-08-02 11:33:04 +05301111
1112 /*
1113 * Fastpath for NVMe-oF target WRITE + SEND_WITH_INV wr chain which is
1114 * the response for small NVMEe-oF READ requests. If the chain is
Potnuri Bharat Tejae6b7b7d2018-12-24 20:54:51 +05301115 * exactly a WRITE->SEND_WITH_INV or a WRITE->SEND and the sgl depths
1116 * and lengths meet the requirements of the fw_ri_write_cmpl_wr work
1117 * request, then build and post the write_cmpl WR. If any of the tests
Potnuri Bharat Teja94245f42018-08-02 11:33:04 +05301118 * below are not true, then we continue on with the tradtional WRITE
1119 * and SEND WRs.
1120 */
1121 if (qhp->rhp->rdev.lldi.write_cmpl_support &&
1122 CHELSIO_CHIP_VERSION(qhp->rhp->rdev.lldi.adapter_type) >=
1123 CHELSIO_T5 &&
1124 wr && wr->next && !wr->next->next &&
1125 wr->opcode == IB_WR_RDMA_WRITE &&
1126 wr->sg_list[0].length && wr->num_sge <= T4_WRITE_CMPL_MAX_SGL &&
Potnuri Bharat Tejae6b7b7d2018-12-24 20:54:51 +05301127 (wr->next->opcode == IB_WR_SEND ||
1128 wr->next->opcode == IB_WR_SEND_WITH_INV) &&
Potnuri Bharat Teja94245f42018-08-02 11:33:04 +05301129 wr->next->sg_list[0].length == T4_WRITE_CMPL_MAX_CQE &&
1130 wr->next->num_sge == 1 && num_wrs >= 2) {
1131 post_write_cmpl(qhp, wr);
1132 spin_unlock_irqrestore(&qhp->lock, flag);
1133 return 0;
1134 }
1135
Steve Wisecfdda9d2010-04-21 15:30:06 -07001136 while (wr) {
1137 if (num_wrs == 0) {
1138 err = -ENOMEM;
1139 *bad_wr = wr;
1140 break;
1141 }
Steve Wised37ac312010-06-10 19:03:00 +00001142 wqe = (union t4_wr *)((u8 *)qhp->wq.sq.queue +
1143 qhp->wq.sq.wq_pidx * T4_EQ_ENTRY_SIZE);
1144
Steve Wisecfdda9d2010-04-21 15:30:06 -07001145 fw_flags = 0;
1146 if (wr->send_flags & IB_SEND_SOLICITED)
1147 fw_flags |= FW_RI_SOLICITED_EVENT_FLAG;
Steve Wiseba32de92014-03-19 17:44:43 +05301148 if (wr->send_flags & IB_SEND_SIGNALED || qhp->sq_sig_all)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001149 fw_flags |= FW_RI_COMPLETION_FLAG;
1150 swsqe = &qhp->wq.sq.sw_sq[qhp->wq.sq.pidx];
1151 switch (wr->opcode) {
1152 case IB_WR_SEND_WITH_INV:
1153 case IB_WR_SEND:
1154 if (wr->send_flags & IB_SEND_FENCE)
1155 fw_flags |= FW_RI_READ_FENCE_FLAG;
1156 fw_opcode = FW_RI_SEND_WR;
1157 if (wr->opcode == IB_WR_SEND)
1158 swsqe->opcode = FW_RI_SEND;
1159 else
1160 swsqe->opcode = FW_RI_SEND_WITH_INV;
Steve Wised37ac312010-06-10 19:03:00 +00001161 err = build_rdma_send(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001162 break;
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +05301163 case IB_WR_RDMA_WRITE_WITH_IMM:
1164 if (unlikely(!rhp->rdev.lldi.write_w_imm_support)) {
1165 err = -EINVAL;
1166 break;
1167 }
1168 fw_flags |= FW_RI_RDMA_WRITE_WITH_IMMEDIATE;
Gustavo A. R. Silvadf561f662020-08-23 17:36:59 -05001169 fallthrough;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001170 case IB_WR_RDMA_WRITE:
1171 fw_opcode = FW_RI_RDMA_WRITE_WR;
1172 swsqe->opcode = FW_RI_RDMA_WRITE;
Steve Wised37ac312010-06-10 19:03:00 +00001173 err = build_rdma_write(&qhp->wq.sq, wqe, wr, &len16);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001174 break;
1175 case IB_WR_RDMA_READ:
Steve Wise2f1fb502010-05-20 16:58:16 -05001176 case IB_WR_RDMA_READ_WITH_INV:
Steve Wisecfdda9d2010-04-21 15:30:06 -07001177 fw_opcode = FW_RI_RDMA_READ_WR;
1178 swsqe->opcode = FW_RI_READ_REQ;
Steve Wise5c6b2aa2016-11-03 12:09:38 -07001179 if (wr->opcode == IB_WR_RDMA_READ_WITH_INV) {
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +05301180 c4iw_invalidate_mr(rhp, wr->sg_list[0].lkey);
Steve Wise410ade42010-09-17 15:40:09 -05001181 fw_flags = FW_RI_RDMA_READ_INVALIDATE;
Steve Wise5c6b2aa2016-11-03 12:09:38 -07001182 } else {
Steve Wise2f1fb502010-05-20 16:58:16 -05001183 fw_flags = 0;
Steve Wise5c6b2aa2016-11-03 12:09:38 -07001184 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001185 err = build_rdma_read(wqe, wr, &len16);
1186 if (err)
1187 break;
1188 swsqe->read_len = wr->sg_list[0].length;
1189 if (!qhp->wq.sq.oldest_read)
1190 qhp->wq.sq.oldest_read = swsqe;
1191 break;
Steve Wise49b53a92016-09-16 07:54:52 -07001192 case IB_WR_REG_MR: {
1193 struct c4iw_mr *mhp = to_c4iw_mr(reg_wr(wr)->mr);
1194
Sagi Grimberg8376b862015-10-13 19:11:30 +03001195 swsqe->opcode = FW_RI_FAST_REGISTER;
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +05301196 if (rhp->rdev.lldi.fr_nsmr_tpte_wr_support &&
Steve Wise49b53a92016-09-16 07:54:52 -07001197 !mhp->attr.state && mhp->mpl_len <= 2) {
1198 fw_opcode = FW_RI_FR_NSMR_TPTE_WR;
1199 build_tpte_memreg(&wqe->fr_tpte, reg_wr(wr),
1200 mhp, &len16);
1201 } else {
1202 fw_opcode = FW_RI_FR_NSMR_WR;
1203 err = build_memreg(&qhp->wq.sq, wqe, reg_wr(wr),
1204 mhp, &len16,
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +05301205 rhp->rdev.lldi.ulptx_memwrite_dsgl);
Steve Wise49b53a92016-09-16 07:54:52 -07001206 if (err)
1207 break;
1208 }
1209 mhp->attr.state = 1;
Sagi Grimberg8376b862015-10-13 19:11:30 +03001210 break;
Steve Wise49b53a92016-09-16 07:54:52 -07001211 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001212 case IB_WR_LOCAL_INV:
Steve Wise4ab1eb92010-05-20 16:58:10 -05001213 if (wr->send_flags & IB_SEND_FENCE)
1214 fw_flags |= FW_RI_LOCAL_FENCE_FLAG;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001215 fw_opcode = FW_RI_INV_LSTAG_WR;
1216 swsqe->opcode = FW_RI_LOCAL_INV;
Steve Wise5c6b2aa2016-11-03 12:09:38 -07001217 err = build_inv_stag(wqe, wr, &len16);
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +05301218 c4iw_invalidate_mr(rhp, wr->ex.invalidate_rkey);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001219 break;
1220 default:
Bharat Potnuri4d45b752017-09-27 13:05:50 +05301221 pr_warn("%s post of type=%d TBD!\n", __func__,
1222 wr->opcode);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001223 err = -EINVAL;
1224 }
1225 if (err) {
1226 *bad_wr = wr;
1227 break;
1228 }
1229 swsqe->idx = qhp->wq.sq.pidx;
1230 swsqe->complete = 0;
Steve Wiseba32de92014-03-19 17:44:43 +05301231 swsqe->signaled = (wr->send_flags & IB_SEND_SIGNALED) ||
1232 qhp->sq_sig_all;
Steve Wise1cf24dc2013-08-06 21:04:35 +05301233 swsqe->flushed = 0;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001234 swsqe->wr_id = wr->wr_id;
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05301235 if (c4iw_wr_log) {
1236 swsqe->sge_ts = cxgb4_read_sge_timestamp(
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +05301237 rhp->rdev.lldi.ports[0]);
Arnd Bergmannf8109d92017-11-27 12:44:53 +01001238 swsqe->host_time = ktime_get();
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05301239 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001240
1241 init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
1242
Bharat Potnuri548ddb12017-09-27 13:05:49 +05301243 pr_debug("cookie 0x%llx pidx 0x%x opcode 0x%x read_len %u\n",
Joe Perchesa9a42882017-02-09 14:23:51 -08001244 (unsigned long long)wr->wr_id, qhp->wq.sq.pidx,
1245 swsqe->opcode, swsqe->read_len);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001246 wr = wr->next;
1247 num_wrs--;
Steve Wised37ac312010-06-10 19:03:00 +00001248 t4_sq_produce(&qhp->wq, len16);
1249 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001250 }
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +05301251 if (!rhp->rdev.status_page->db_off) {
Hariprasad S963cab52015-09-23 17:19:27 +05301252 t4_ring_sq_db(&qhp->wq, idx, wqe);
Steve Wise05eb2382014-03-14 21:52:08 +05301253 spin_unlock_irqrestore(&qhp->lock, flag);
1254 } else {
1255 spin_unlock_irqrestore(&qhp->lock, flag);
1256 ring_kernel_sq_db(qhp, idx);
1257 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001258 return err;
1259}
1260
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001261int c4iw_post_receive(struct ib_qp *ibqp, const struct ib_recv_wr *wr,
1262 const struct ib_recv_wr **bad_wr)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001263{
1264 int err = 0;
1265 struct c4iw_qp *qhp;
Steve Wisefa658a92014-04-09 09:38:25 -05001266 union t4_recv_wr *wqe = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001267 u32 num_wrs;
1268 u8 len16 = 0;
1269 unsigned long flag;
1270 u16 idx = 0;
1271
1272 qhp = to_c4iw_qp(ibqp);
1273 spin_lock_irqsave(&qhp->lock, flag);
Steve Wisec058ecf2017-11-27 13:16:32 -08001274
1275 /*
1276 * If the qp has been flushed, then just insert a special
1277 * drain cqe.
1278 */
1279 if (qhp->wq.flushed) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001280 spin_unlock_irqrestore(&qhp->lock, flag);
Steve Wised1458732017-12-19 14:02:10 -08001281 complete_rq_drain_wrs(qhp, wr);
Steve Wise4fe7c292016-12-22 07:04:59 -08001282 return err;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001283 }
1284 num_wrs = t4_rq_avail(&qhp->wq);
1285 if (num_wrs == 0) {
1286 spin_unlock_irqrestore(&qhp->lock, flag);
Steve Wise4ff522e2016-10-18 14:04:39 -07001287 *bad_wr = wr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001288 return -ENOMEM;
1289 }
1290 while (wr) {
1291 if (wr->num_sge > T4_MAX_RECV_SGE) {
1292 err = -EINVAL;
1293 *bad_wr = wr;
1294 break;
1295 }
Steve Wised37ac312010-06-10 19:03:00 +00001296 wqe = (union t4_recv_wr *)((u8 *)qhp->wq.rq.queue +
1297 qhp->wq.rq.wq_pidx *
1298 T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001299 if (num_wrs)
1300 err = build_rdma_recv(qhp, wqe, wr, &len16);
1301 else
1302 err = -ENOMEM;
1303 if (err) {
1304 *bad_wr = wr;
1305 break;
1306 }
1307
1308 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05301309 if (c4iw_wr_log) {
1310 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
1311 cxgb4_read_sge_timestamp(
1312 qhp->rhp->rdev.lldi.ports[0]);
Arnd Bergmannf8109d92017-11-27 12:44:53 +01001313 qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_time =
1314 ktime_get();
Hariprasad Shenai7730b4c2014-07-14 21:34:54 +05301315 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001316
1317 wqe->recv.opcode = FW_RI_RECV_WR;
1318 wqe->recv.r1 = 0;
1319 wqe->recv.wrid = qhp->wq.rq.pidx;
1320 wqe->recv.r2[0] = 0;
1321 wqe->recv.r2[1] = 0;
1322 wqe->recv.r2[2] = 0;
1323 wqe->recv.len16 = len16;
Bharat Potnuri548ddb12017-09-27 13:05:49 +05301324 pr_debug("cookie 0x%llx pidx %u\n",
Joe Perchesa9a42882017-02-09 14:23:51 -08001325 (unsigned long long)wr->wr_id, qhp->wq.rq.pidx);
Steve Wised37ac312010-06-10 19:03:00 +00001326 t4_rq_produce(&qhp->wq, len16);
1327 idx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001328 wr = wr->next;
1329 num_wrs--;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001330 }
Steve Wise05eb2382014-03-14 21:52:08 +05301331 if (!qhp->rhp->rdev.status_page->db_off) {
Hariprasad S963cab52015-09-23 17:19:27 +05301332 t4_ring_rq_db(&qhp->wq, idx, wqe);
Steve Wise05eb2382014-03-14 21:52:08 +05301333 spin_unlock_irqrestore(&qhp->lock, flag);
1334 } else {
1335 spin_unlock_irqrestore(&qhp->lock, flag);
1336 ring_kernel_rq_db(qhp, idx);
1337 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001338 return err;
1339}
1340
Raju Rangoju6a0b6172018-07-25 21:22:14 +05301341static void defer_srq_wr(struct t4_srq *srq, union t4_recv_wr *wqe,
1342 u64 wr_id, u8 len16)
1343{
1344 struct t4_srq_pending_wr *pwr = &srq->pending_wrs[srq->pending_pidx];
1345
1346 pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u ooo_count %u wr_id 0x%llx pending_cidx %u pending_pidx %u pending_in_use %u\n",
1347 __func__, srq->cidx, srq->pidx, srq->wq_pidx,
1348 srq->in_use, srq->ooo_count,
1349 (unsigned long long)wr_id, srq->pending_cidx,
1350 srq->pending_pidx, srq->pending_in_use);
1351 pwr->wr_id = wr_id;
1352 pwr->len16 = len16;
1353 memcpy(&pwr->wqe, wqe, len16 * 16);
1354 t4_srq_produce_pending_wr(srq);
1355}
1356
Bart Van Assched34ac5c2018-07-18 09:25:32 -07001357int c4iw_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1358 const struct ib_recv_wr **bad_wr)
Raju Rangoju6a0b6172018-07-25 21:22:14 +05301359{
1360 union t4_recv_wr *wqe, lwqe;
1361 struct c4iw_srq *srq;
1362 unsigned long flag;
1363 u8 len16 = 0;
1364 u16 idx = 0;
1365 int err = 0;
1366 u32 num_wrs;
1367
1368 srq = to_c4iw_srq(ibsrq);
1369 spin_lock_irqsave(&srq->lock, flag);
1370 num_wrs = t4_srq_avail(&srq->wq);
1371 if (num_wrs == 0) {
1372 spin_unlock_irqrestore(&srq->lock, flag);
1373 return -ENOMEM;
1374 }
1375 while (wr) {
1376 if (wr->num_sge > T4_MAX_RECV_SGE) {
1377 err = -EINVAL;
1378 *bad_wr = wr;
1379 break;
1380 }
1381 wqe = &lwqe;
1382 if (num_wrs)
1383 err = build_srq_recv(wqe, wr, &len16);
1384 else
1385 err = -ENOMEM;
1386 if (err) {
1387 *bad_wr = wr;
1388 break;
1389 }
1390
1391 wqe->recv.opcode = FW_RI_RECV_WR;
1392 wqe->recv.r1 = 0;
1393 wqe->recv.wrid = srq->wq.pidx;
1394 wqe->recv.r2[0] = 0;
1395 wqe->recv.r2[1] = 0;
1396 wqe->recv.r2[2] = 0;
1397 wqe->recv.len16 = len16;
1398
1399 if (srq->wq.ooo_count ||
1400 srq->wq.pending_in_use ||
1401 srq->wq.sw_rq[srq->wq.pidx].valid) {
1402 defer_srq_wr(&srq->wq, wqe, wr->wr_id, len16);
1403 } else {
1404 srq->wq.sw_rq[srq->wq.pidx].wr_id = wr->wr_id;
1405 srq->wq.sw_rq[srq->wq.pidx].valid = 1;
1406 c4iw_copy_wr_to_srq(&srq->wq, wqe, len16);
1407 pr_debug("%s cidx %u pidx %u wq_pidx %u in_use %u wr_id 0x%llx\n",
1408 __func__, srq->wq.cidx,
1409 srq->wq.pidx, srq->wq.wq_pidx,
1410 srq->wq.in_use,
1411 (unsigned long long)wr->wr_id);
1412 t4_srq_produce(&srq->wq, len16);
1413 idx += DIV_ROUND_UP(len16 * 16, T4_EQ_ENTRY_SIZE);
1414 }
1415 wr = wr->next;
1416 num_wrs--;
1417 }
1418 if (idx)
1419 t4_ring_srq_db(&srq->wq, idx, len16, wqe);
1420 spin_unlock_irqrestore(&srq->lock, flag);
1421 return err;
1422}
1423
Steve Wisecfdda9d2010-04-21 15:30:06 -07001424static inline void build_term_codes(struct t4_cqe *err_cqe, u8 *layer_type,
1425 u8 *ecode)
1426{
1427 int status;
1428 int tagged;
1429 int opcode;
1430 int rqtype;
1431 int send_inv;
1432
1433 if (!err_cqe) {
1434 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1435 *ecode = 0;
1436 return;
1437 }
1438
1439 status = CQE_STATUS(err_cqe);
1440 opcode = CQE_OPCODE(err_cqe);
1441 rqtype = RQ_TYPE(err_cqe);
1442 send_inv = (opcode == FW_RI_SEND_WITH_INV) ||
1443 (opcode == FW_RI_SEND_WITH_SE_INV);
1444 tagged = (opcode == FW_RI_RDMA_WRITE) ||
1445 (rqtype && (opcode == FW_RI_READ_RESP));
1446
1447 switch (status) {
1448 case T4_ERR_STAG:
1449 if (send_inv) {
1450 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1451 *ecode = RDMAP_CANT_INV_STAG;
1452 } else {
1453 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1454 *ecode = RDMAP_INV_STAG;
1455 }
1456 break;
1457 case T4_ERR_PDID:
1458 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1459 if ((opcode == FW_RI_SEND_WITH_INV) ||
1460 (opcode == FW_RI_SEND_WITH_SE_INV))
1461 *ecode = RDMAP_CANT_INV_STAG;
1462 else
1463 *ecode = RDMAP_STAG_NOT_ASSOC;
1464 break;
1465 case T4_ERR_QPID:
1466 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1467 *ecode = RDMAP_STAG_NOT_ASSOC;
1468 break;
1469 case T4_ERR_ACCESS:
1470 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1471 *ecode = RDMAP_ACC_VIOL;
1472 break;
1473 case T4_ERR_WRAP:
1474 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1475 *ecode = RDMAP_TO_WRAP;
1476 break;
1477 case T4_ERR_BOUND:
1478 if (tagged) {
1479 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1480 *ecode = DDPT_BASE_BOUNDS;
1481 } else {
1482 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_PROT;
1483 *ecode = RDMAP_BASE_BOUNDS;
1484 }
1485 break;
1486 case T4_ERR_INVALIDATE_SHARED_MR:
1487 case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND:
1488 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1489 *ecode = RDMAP_CANT_INV_STAG;
1490 break;
1491 case T4_ERR_ECC:
1492 case T4_ERR_ECC_PSTAG:
1493 case T4_ERR_INTERNAL_ERR:
1494 *layer_type = LAYER_RDMAP|RDMAP_LOCAL_CATA;
1495 *ecode = 0;
1496 break;
1497 case T4_ERR_OUT_OF_RQE:
1498 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1499 *ecode = DDPU_INV_MSN_NOBUF;
1500 break;
1501 case T4_ERR_PBL_ADDR_BOUND:
1502 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1503 *ecode = DDPT_BASE_BOUNDS;
1504 break;
1505 case T4_ERR_CRC:
1506 *layer_type = LAYER_MPA|DDP_LLP;
1507 *ecode = MPA_CRC_ERR;
1508 break;
1509 case T4_ERR_MARKER:
1510 *layer_type = LAYER_MPA|DDP_LLP;
1511 *ecode = MPA_MARKER_ERR;
1512 break;
1513 case T4_ERR_PDU_LEN_ERR:
1514 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1515 *ecode = DDPU_MSG_TOOBIG;
1516 break;
1517 case T4_ERR_DDP_VERSION:
1518 if (tagged) {
1519 *layer_type = LAYER_DDP|DDP_TAGGED_ERR;
1520 *ecode = DDPT_INV_VERS;
1521 } else {
1522 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1523 *ecode = DDPU_INV_VERS;
1524 }
1525 break;
1526 case T4_ERR_RDMA_VERSION:
1527 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1528 *ecode = RDMAP_INV_VERS;
1529 break;
1530 case T4_ERR_OPCODE:
1531 *layer_type = LAYER_RDMAP|RDMAP_REMOTE_OP;
1532 *ecode = RDMAP_INV_OPCODE;
1533 break;
1534 case T4_ERR_DDP_QUEUE_NUM:
1535 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1536 *ecode = DDPU_INV_QN;
1537 break;
1538 case T4_ERR_MSN:
1539 case T4_ERR_MSN_GAP:
1540 case T4_ERR_MSN_RANGE:
1541 case T4_ERR_IRD_OVERFLOW:
1542 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1543 *ecode = DDPU_INV_MSN_RANGE;
1544 break;
1545 case T4_ERR_TBIT:
1546 *layer_type = LAYER_DDP|DDP_LOCAL_CATA;
1547 *ecode = 0;
1548 break;
1549 case T4_ERR_MO:
1550 *layer_type = LAYER_DDP|DDP_UNTAGGED_ERR;
1551 *ecode = DDPU_INV_MO;
1552 break;
1553 default:
1554 *layer_type = LAYER_RDMAP|DDP_LOCAL_CATA;
1555 *ecode = 0;
1556 break;
1557 }
1558}
1559
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001560static void post_terminate(struct c4iw_qp *qhp, struct t4_cqe *err_cqe,
1561 gfp_t gfp)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001562{
1563 struct fw_ri_wr *wqe;
1564 struct sk_buff *skb;
1565 struct terminate_message *term;
1566
Bharat Potnuri548ddb12017-09-27 13:05:49 +05301567 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid,
Joe Perchesa9a42882017-02-09 14:23:51 -08001568 qhp->ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001569
Hariprasad S4a740832016-06-10 01:05:15 +05301570 skb = skb_dequeue(&qhp->ep->com.ep_skb_list);
1571 if (WARN_ON(!skb))
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001572 return;
Hariprasad S4a740832016-06-10 01:05:15 +05301573
Steve Wisecfdda9d2010-04-21 15:30:06 -07001574 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1575
YueHaibingecb238f2018-04-28 15:31:06 +08001576 wqe = __skb_put_zero(skb, sizeof(*wqe));
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301577 wqe->op_compl = cpu_to_be32(FW_WR_OP_V(FW_RI_INIT_WR));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001578 wqe->flowid_len16 = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301579 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1580 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001581
1582 wqe->u.terminate.type = FW_RI_TYPE_TERMINATE;
Leon Romanovsky34d56892019-05-20 09:54:31 +03001583 wqe->u.terminate.immdlen = cpu_to_be32(sizeof(*term));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001584 term = (struct terminate_message *)wqe->u.terminate.termmsg;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301585 if (qhp->attr.layer_etype == (LAYER_MPA|DDP_LLP)) {
1586 term->layer_etype = qhp->attr.layer_etype;
1587 term->ecode = qhp->attr.ecode;
1588 } else
1589 build_term_codes(err_cqe, &term->layer_etype, &term->ecode);
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001590 c4iw_ofld_send(&qhp->rhp->rdev, skb);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001591}
1592
1593/*
1594 * Assumes qhp lock is held.
1595 */
1596static void __flush_qp(struct c4iw_qp *qhp, struct c4iw_cq *rchp,
Steve Wise2f5b48c2010-09-10 11:15:36 -05001597 struct c4iw_cq *schp)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001598{
1599 int count;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05301600 int rq_flushed = 0, sq_flushed;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001601 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001602
Bharat Potnuri548ddb12017-09-27 13:05:49 +05301603 pr_debug("qhp %p rchp %p schp %p\n", qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001604
Steve Wisebc52e9c2017-11-09 07:21:26 -08001605 /* locking hierarchy: cqs lock first, then qp lock. */
Steve Wise2f5b48c2010-09-10 11:15:36 -05001606 spin_lock_irqsave(&rchp->lock, flag);
Steve Wisebc52e9c2017-11-09 07:21:26 -08001607 if (schp != rchp)
1608 spin_lock(&schp->lock);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001609 spin_lock(&qhp->lock);
Steve Wise1cf24dc2013-08-06 21:04:35 +05301610
1611 if (qhp->wq.flushed) {
1612 spin_unlock(&qhp->lock);
Steve Wisebc52e9c2017-11-09 07:21:26 -08001613 if (schp != rchp)
1614 spin_unlock(&schp->lock);
Steve Wise1cf24dc2013-08-06 21:04:35 +05301615 spin_unlock_irqrestore(&rchp->lock, flag);
1616 return;
1617 }
1618 qhp->wq.flushed = 1;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05301619 t4_set_wq_in_error(&qhp->wq, 0);
Steve Wise1cf24dc2013-08-06 21:04:35 +05301620
Bharat Potnuri2df19e12018-04-27 16:41:16 +05301621 c4iw_flush_hw_cq(rchp, qhp);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05301622 if (!qhp->srq) {
1623 c4iw_count_rcqes(&rchp->cq, &qhp->wq, &count);
1624 rq_flushed = c4iw_flush_rq(&qhp->wq, &rchp->cq, count);
1625 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001626
Steve Wise1cf24dc2013-08-06 21:04:35 +05301627 if (schp != rchp)
Bharat Potnuri2df19e12018-04-27 16:41:16 +05301628 c4iw_flush_hw_cq(schp, qhp);
Steve Wise678ea9b2014-07-31 14:35:43 -05001629 sq_flushed = c4iw_flush_sq(qhp);
Steve Wisebc52e9c2017-11-09 07:21:26 -08001630
Steve Wisecfdda9d2010-04-21 15:30:06 -07001631 spin_unlock(&qhp->lock);
Steve Wisebc52e9c2017-11-09 07:21:26 -08001632 if (schp != rchp)
1633 spin_unlock(&schp->lock);
1634 spin_unlock_irqrestore(&rchp->lock, flag);
Steve Wise678ea9b2014-07-31 14:35:43 -05001635
1636 if (schp == rchp) {
Steve Wise335ebf62017-11-30 09:41:56 -08001637 if ((rq_flushed || sq_flushed) &&
1638 t4_clear_cq_armed(&rchp->cq)) {
Steve Wise678ea9b2014-07-31 14:35:43 -05001639 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1640 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1641 rchp->ibcq.cq_context);
1642 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1643 }
1644 } else {
Steve Wise335ebf62017-11-30 09:41:56 -08001645 if (rq_flushed && t4_clear_cq_armed(&rchp->cq)) {
Steve Wise678ea9b2014-07-31 14:35:43 -05001646 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
1647 (*rchp->ibcq.comp_handler)(&rchp->ibcq,
1648 rchp->ibcq.cq_context);
1649 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
1650 }
Steve Wise335ebf62017-11-30 09:41:56 -08001651 if (sq_flushed && t4_clear_cq_armed(&schp->cq)) {
Steve Wise678ea9b2014-07-31 14:35:43 -05001652 spin_lock_irqsave(&schp->comp_handler_lock, flag);
1653 (*schp->ibcq.comp_handler)(&schp->ibcq,
1654 schp->ibcq.cq_context);
1655 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
1656 }
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301657 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001658}
1659
Steve Wise2f5b48c2010-09-10 11:15:36 -05001660static void flush_qp(struct c4iw_qp *qhp)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001661{
1662 struct c4iw_cq *rchp, *schp;
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301663 unsigned long flag;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001664
Steve Wise1cf24dc2013-08-06 21:04:35 +05301665 rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
1666 schp = to_c4iw_cq(qhp->ibqp.send_cq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001667
1668 if (qhp->ibqp.uobject) {
Steve Wise308aa2b2018-08-31 07:15:56 -07001669
1670 /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
1671 if (qhp->wq.flushed)
1672 return;
1673
1674 qhp->wq.flushed = 1;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05301675 t4_set_wq_in_error(&qhp->wq, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001676 t4_set_cq_in_error(&rchp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301677 spin_lock_irqsave(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301678 (*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301679 spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301680 if (schp != rchp) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001681 t4_set_cq_in_error(&schp->cq);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301682 spin_lock_irqsave(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301683 (*schp->ibcq.comp_handler)(&schp->ibcq,
1684 schp->ibcq.cq_context);
Kumar Sanghvi581bbe22011-10-24 21:20:21 +05301685 spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
Kumar Sanghvi01e7da62011-10-13 13:51:30 +05301686 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001687 return;
1688 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05001689 __flush_qp(qhp, rchp, schp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001690}
1691
Steve Wise73d6fca2010-07-23 19:12:27 +00001692static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1693 struct c4iw_ep *ep)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001694{
1695 struct fw_ri_wr *wqe;
1696 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001697 struct sk_buff *skb;
1698
Bharat Potnuri548ddb12017-09-27 13:05:49 +05301699 pr_debug("qhp %p qid 0x%x tid %u\n", qhp, qhp->wq.sq.qid, ep->hwtid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001700
Hariprasad S4a740832016-06-10 01:05:15 +05301701 skb = skb_dequeue(&ep->com.ep_skb_list);
1702 if (WARN_ON(!skb))
Steve Wisecfdda9d2010-04-21 15:30:06 -07001703 return -ENOMEM;
Hariprasad S4a740832016-06-10 01:05:15 +05301704
Steve Wise73d6fca2010-07-23 19:12:27 +00001705 set_wr_txq(skb, CPL_PRIORITY_DATA, ep->txq_idx);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001706
YueHaibingecb238f2018-04-28 15:31:06 +08001707 wqe = __skb_put_zero(skb, sizeof(*wqe));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001708 wqe->op_compl = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301709 FW_WR_OP_V(FW_RI_INIT_WR) |
1710 FW_WR_COMPL_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001711 wqe->flowid_len16 = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301712 FW_WR_FLOWID_V(ep->hwtid) |
1713 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
Steve Wiseef885dc2017-09-26 13:12:16 -07001714 wqe->cookie = (uintptr_t)ep->com.wr_waitp;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001715
1716 wqe->u.fini.type = FW_RI_TYPE_FINI;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001717
Steve Wise2015f262017-09-26 13:13:17 -07001718 ret = c4iw_ref_send_wait(&rhp->rdev, skb, ep->com.wr_waitp,
1719 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
1720
Bharat Potnuri548ddb12017-09-27 13:05:49 +05301721 pr_debug("ret %d\n", ret);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001722 return ret;
1723}
1724
1725static void build_rtr_msg(u8 p2p_type, struct fw_ri_init *init)
1726{
Bharat Potnuri548ddb12017-09-27 13:05:49 +05301727 pr_debug("p2p_type = %d\n", p2p_type);
Leon Romanovsky34d56892019-05-20 09:54:31 +03001728 memset(&init->u, 0, sizeof(init->u));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001729 switch (p2p_type) {
1730 case FW_RI_INIT_P2PTYPE_RDMA_WRITE:
1731 init->u.write.opcode = FW_RI_RDMA_WRITE_WR;
1732 init->u.write.stag_sink = cpu_to_be32(1);
1733 init->u.write.to_sink = cpu_to_be64(1);
1734 init->u.write.u.immd_src[0].op = FW_RI_DATA_IMMD;
Leon Romanovsky34d56892019-05-20 09:54:31 +03001735 init->u.write.len16 = DIV_ROUND_UP(
1736 sizeof(init->u.write) + sizeof(struct fw_ri_immd), 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001737 break;
1738 case FW_RI_INIT_P2PTYPE_READ_REQ:
1739 init->u.write.opcode = FW_RI_RDMA_READ_WR;
1740 init->u.read.stag_src = cpu_to_be32(1);
1741 init->u.read.to_src_lo = cpu_to_be32(1);
1742 init->u.read.stag_sink = cpu_to_be32(1);
1743 init->u.read.to_sink_lo = cpu_to_be32(1);
Leon Romanovsky34d56892019-05-20 09:54:31 +03001744 init->u.read.len16 = DIV_ROUND_UP(sizeof(init->u.read), 16);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001745 break;
1746 }
1747}
1748
1749static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
1750{
1751 struct fw_ri_wr *wqe;
1752 int ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001753 struct sk_buff *skb;
1754
Bharat Potnuri548ddb12017-09-27 13:05:49 +05301755 pr_debug("qhp %p qid 0x%x tid %u ird %u ord %u\n", qhp,
Joe Perchesa9a42882017-02-09 14:23:51 -08001756 qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001757
Leon Romanovsky34d56892019-05-20 09:54:31 +03001758 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301759 if (!skb) {
1760 ret = -ENOMEM;
1761 goto out;
1762 }
1763 ret = alloc_ird(rhp, qhp->attr.max_ird);
1764 if (ret) {
1765 qhp->attr.max_ird = 0;
1766 kfree_skb(skb);
1767 goto out;
1768 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001769 set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
1770
YueHaibingecb238f2018-04-28 15:31:06 +08001771 wqe = __skb_put_zero(skb, sizeof(*wqe));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001772 wqe->op_compl = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301773 FW_WR_OP_V(FW_RI_INIT_WR) |
1774 FW_WR_COMPL_F);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001775 wqe->flowid_len16 = cpu_to_be32(
Hariprasad Shenaie2ac9622014-11-07 09:35:25 +05301776 FW_WR_FLOWID_V(qhp->ep->hwtid) |
1777 FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
Steve Wisecfdda9d2010-04-21 15:30:06 -07001778
Steve Wiseef885dc2017-09-26 13:12:16 -07001779 wqe->cookie = (uintptr_t)qhp->ep->com.wr_waitp;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001780
1781 wqe->u.init.type = FW_RI_TYPE_INIT;
1782 wqe->u.init.mpareqbit_p2ptype =
Hariprasad Shenaicf7fe642015-01-16 09:24:48 +05301783 FW_RI_WR_MPAREQBIT_V(qhp->attr.mpa_attr.initiator) |
1784 FW_RI_WR_P2PTYPE_V(qhp->attr.mpa_attr.p2p_type);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001785 wqe->u.init.mpa_attrs = FW_RI_MPA_IETF_ENABLE;
1786 if (qhp->attr.mpa_attr.recv_marker_enabled)
1787 wqe->u.init.mpa_attrs |= FW_RI_MPA_RX_MARKER_ENABLE;
1788 if (qhp->attr.mpa_attr.xmit_marker_enabled)
1789 wqe->u.init.mpa_attrs |= FW_RI_MPA_TX_MARKER_ENABLE;
1790 if (qhp->attr.mpa_attr.crc_enabled)
1791 wqe->u.init.mpa_attrs |= FW_RI_MPA_CRC_ENABLE;
1792
1793 wqe->u.init.qp_caps = FW_RI_QP_RDMA_READ_ENABLE |
1794 FW_RI_QP_RDMA_WRITE_ENABLE |
1795 FW_RI_QP_BIND_ENABLE;
1796 if (!qhp->ibqp.uobject)
1797 wqe->u.init.qp_caps |= FW_RI_QP_FAST_REGISTER_ENABLE |
1798 FW_RI_QP_STAG0_ENABLE;
1799 wqe->u.init.nrqe = cpu_to_be16(t4_rqes_posted(&qhp->wq));
1800 wqe->u.init.pdid = cpu_to_be32(qhp->attr.pd);
1801 wqe->u.init.qpid = cpu_to_be32(qhp->wq.sq.qid);
1802 wqe->u.init.sq_eqid = cpu_to_be32(qhp->wq.sq.qid);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05301803 if (qhp->srq) {
1804 wqe->u.init.rq_eqid = cpu_to_be32(FW_RI_INIT_RQEQID_SRQ |
1805 qhp->srq->idx);
1806 } else {
1807 wqe->u.init.rq_eqid = cpu_to_be32(qhp->wq.rq.qid);
1808 wqe->u.init.hwrqsize = cpu_to_be32(qhp->wq.rq.rqt_size);
1809 wqe->u.init.hwrqaddr = cpu_to_be32(qhp->wq.rq.rqt_hwaddr -
1810 rhp->rdev.lldi.vr->rq.start);
1811 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001812 wqe->u.init.scqid = cpu_to_be32(qhp->attr.scq);
1813 wqe->u.init.rcqid = cpu_to_be32(qhp->attr.rcq);
1814 wqe->u.init.ord_max = cpu_to_be32(qhp->attr.max_ord);
1815 wqe->u.init.ird_max = cpu_to_be32(qhp->attr.max_ird);
1816 wqe->u.init.iss = cpu_to_be32(qhp->ep->snd_seq);
1817 wqe->u.init.irs = cpu_to_be32(qhp->ep->rcv_seq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001818 if (qhp->attr.mpa_attr.initiator)
1819 build_rtr_msg(qhp->attr.mpa_attr.p2p_type, &wqe->u.init);
1820
Steve Wise2015f262017-09-26 13:13:17 -07001821 ret = c4iw_ref_send_wait(&rhp->rdev, skb, qhp->ep->com.wr_waitp,
1822 qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301823 if (!ret)
1824 goto out;
Steve Wise2015f262017-09-26 13:13:17 -07001825
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301826 free_ird(rhp, qhp->attr.max_ird);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001827out:
Bharat Potnuri548ddb12017-09-27 13:05:49 +05301828 pr_debug("ret %d\n", ret);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001829 return ret;
1830}
1831
1832int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
1833 enum c4iw_qp_attr_mask mask,
1834 struct c4iw_qp_attributes *attrs,
1835 int internal)
1836{
1837 int ret = 0;
1838 struct c4iw_qp_attributes newattr = qhp->attr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001839 int disconnect = 0;
1840 int terminate = 0;
1841 int abort = 0;
1842 int free = 0;
1843 struct c4iw_ep *ep = NULL;
1844
Bharat Potnuri548ddb12017-09-27 13:05:49 +05301845 pr_debug("qhp %p sqid 0x%x rqid 0x%x ep %p state %d -> %d\n",
Joe Perchesa9a42882017-02-09 14:23:51 -08001846 qhp, qhp->wq.sq.qid, qhp->wq.rq.qid, qhp->ep, qhp->attr.state,
1847 (mask & C4IW_QP_ATTR_NEXT_STATE) ? attrs->next_state : -1);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001848
Steve Wise2f5b48c2010-09-10 11:15:36 -05001849 mutex_lock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001850
1851 /* Process attr changes if in IDLE */
1852 if (mask & C4IW_QP_ATTR_VALID_MODIFY) {
1853 if (qhp->attr.state != C4IW_QP_STATE_IDLE) {
1854 ret = -EIO;
1855 goto out;
1856 }
1857 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_READ)
1858 newattr.enable_rdma_read = attrs->enable_rdma_read;
1859 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_WRITE)
1860 newattr.enable_rdma_write = attrs->enable_rdma_write;
1861 if (mask & C4IW_QP_ATTR_ENABLE_RDMA_BIND)
1862 newattr.enable_bind = attrs->enable_bind;
1863 if (mask & C4IW_QP_ATTR_MAX_ORD) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001864 if (attrs->max_ord > c4iw_max_read_depth) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001865 ret = -EINVAL;
1866 goto out;
1867 }
1868 newattr.max_ord = attrs->max_ord;
1869 }
1870 if (mask & C4IW_QP_ATTR_MAX_IRD) {
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05301871 if (attrs->max_ird > cur_max_read_depth(rhp)) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001872 ret = -EINVAL;
1873 goto out;
1874 }
1875 newattr.max_ird = attrs->max_ird;
1876 }
1877 qhp->attr = newattr;
1878 }
1879
Vipul Pandya2c974782012-05-18 15:29:28 +05301880 if (mask & C4IW_QP_ATTR_SQ_DB) {
Steve Wise05eb2382014-03-14 21:52:08 +05301881 ret = ring_kernel_sq_db(qhp, attrs->sq_db_inc);
Vipul Pandya2c974782012-05-18 15:29:28 +05301882 goto out;
1883 }
1884 if (mask & C4IW_QP_ATTR_RQ_DB) {
Steve Wise05eb2382014-03-14 21:52:08 +05301885 ret = ring_kernel_rq_db(qhp, attrs->rq_db_inc);
Vipul Pandya2c974782012-05-18 15:29:28 +05301886 goto out;
1887 }
1888
Steve Wisecfdda9d2010-04-21 15:30:06 -07001889 if (!(mask & C4IW_QP_ATTR_NEXT_STATE))
1890 goto out;
1891 if (qhp->attr.state == attrs->next_state)
1892 goto out;
1893
1894 switch (qhp->attr.state) {
1895 case C4IW_QP_STATE_IDLE:
1896 switch (attrs->next_state) {
1897 case C4IW_QP_STATE_RTS:
1898 if (!(mask & C4IW_QP_ATTR_LLP_STREAM_HANDLE)) {
1899 ret = -EINVAL;
1900 goto out;
1901 }
1902 if (!(mask & C4IW_QP_ATTR_MPA_ATTR)) {
1903 ret = -EINVAL;
1904 goto out;
1905 }
1906 qhp->attr.mpa_attr = attrs->mpa_attr;
1907 qhp->attr.llp_stream_handle = attrs->llp_stream_handle;
1908 qhp->ep = qhp->attr.llp_stream_handle;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001909 set_state(qhp, C4IW_QP_STATE_RTS);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001910
1911 /*
1912 * Ref the endpoint here and deref when we
1913 * disassociate the endpoint from the QP. This
1914 * happens in CLOSING->IDLE transition or *->ERROR
1915 * transition.
1916 */
1917 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001918 ret = rdma_init(rhp, qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001919 if (ret)
1920 goto err;
1921 break;
1922 case C4IW_QP_STATE_ERROR:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001923 set_state(qhp, C4IW_QP_STATE_ERROR);
1924 flush_qp(qhp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001925 break;
1926 default:
1927 ret = -EINVAL;
1928 goto out;
1929 }
1930 break;
1931 case C4IW_QP_STATE_RTS:
1932 switch (attrs->next_state) {
1933 case C4IW_QP_STATE_CLOSING:
Raju Rangoju6a0b6172018-07-25 21:22:14 +05301934 t4_set_wq_in_error(&qhp->wq, 0);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001935 set_state(qhp, C4IW_QP_STATE_CLOSING);
Steve Wise73d6fca2010-07-23 19:12:27 +00001936 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001937 if (!internal) {
1938 abort = 0;
1939 disconnect = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001940 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001941 }
Steve Wise73d6fca2010-07-23 19:12:27 +00001942 ret = rdma_fini(rhp, qhp, ep);
Steve Wise8da7e7a2011-06-14 20:59:27 +00001943 if (ret)
Steve Wisecfdda9d2010-04-21 15:30:06 -07001944 goto err;
Steve Wisecfdda9d2010-04-21 15:30:06 -07001945 break;
1946 case C4IW_QP_STATE_TERMINATE:
Raju Rangoju6a0b6172018-07-25 21:22:14 +05301947 t4_set_wq_in_error(&qhp->wq, 0);
Steve Wise2f5b48c2010-09-10 11:15:36 -05001948 set_state(qhp, C4IW_QP_STATE_TERMINATE);
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05301949 qhp->attr.layer_etype = attrs->layer_etype;
1950 qhp->attr.ecode = attrs->ecode;
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07001951 ep = qhp->ep;
Steve Wisecc18b932014-04-24 14:31:53 -05001952 if (!internal) {
Krishnamraju Eraparajud219fac2020-02-04 14:42:30 +05301953 c4iw_get_ep(&ep->com);
Steve Wise0e42c1f2010-09-10 11:15:09 -05001954 terminate = 1;
Krishnamraju Eraparajud219fac2020-02-04 14:42:30 +05301955 disconnect = 1;
Steve Wisecc18b932014-04-24 14:31:53 -05001956 } else {
1957 terminate = qhp->attr.send_term;
Steve Wise09992572013-08-06 21:04:40 +05301958 ret = rdma_fini(rhp, qhp, ep);
1959 if (ret)
1960 goto err;
1961 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07001962 break;
1963 case C4IW_QP_STATE_ERROR:
Raju Rangoju6a0b6172018-07-25 21:22:14 +05301964 t4_set_wq_in_error(&qhp->wq, 0);
Steve Wiseb4e29012014-04-09 09:38:26 -05001965 set_state(qhp, C4IW_QP_STATE_ERROR);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001966 if (!internal) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001967 disconnect = 1;
1968 ep = qhp->ep;
Steve Wise2f5b48c2010-09-10 11:15:36 -05001969 c4iw_get_ep(&qhp->ep->com);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001970 }
1971 goto err;
1972 break;
1973 default:
1974 ret = -EINVAL;
1975 goto out;
1976 }
1977 break;
1978 case C4IW_QP_STATE_CLOSING:
Steve Wise4fe7c292016-12-22 07:04:59 -08001979
1980 /*
1981 * Allow kernel users to move to ERROR for qp draining.
1982 */
1983 if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
1984 C4IW_QP_STATE_ERROR)) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07001985 ret = -EINVAL;
1986 goto out;
1987 }
1988 switch (attrs->next_state) {
1989 case C4IW_QP_STATE_IDLE:
Steve Wise2f5b48c2010-09-10 11:15:36 -05001990 flush_qp(qhp);
1991 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07001992 qhp->attr.llp_stream_handle = NULL;
1993 c4iw_put_ep(&qhp->ep->com);
1994 qhp->ep = NULL;
1995 wake_up(&qhp->wait);
1996 break;
1997 case C4IW_QP_STATE_ERROR:
1998 goto err;
1999 default:
2000 ret = -EINVAL;
2001 goto err;
2002 }
2003 break;
2004 case C4IW_QP_STATE_ERROR:
2005 if (attrs->next_state != C4IW_QP_STATE_IDLE) {
2006 ret = -EINVAL;
2007 goto out;
2008 }
2009 if (!t4_sq_empty(&qhp->wq) || !t4_rq_empty(&qhp->wq)) {
2010 ret = -EINVAL;
2011 goto out;
2012 }
Steve Wise2f5b48c2010-09-10 11:15:36 -05002013 set_state(qhp, C4IW_QP_STATE_IDLE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002014 break;
2015 case C4IW_QP_STATE_TERMINATE:
2016 if (!internal) {
2017 ret = -EINVAL;
2018 goto out;
2019 }
2020 goto err;
2021 break;
2022 default:
Joe Perches700456b2017-02-09 14:23:50 -08002023 pr_err("%s in a bad state %d\n", __func__, qhp->attr.state);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002024 ret = -EINVAL;
2025 goto err;
2026 break;
2027 }
2028 goto out;
2029err:
Bharat Potnuri548ddb12017-09-27 13:05:49 +05302030 pr_debug("disassociating ep %p qpid 0x%x\n", qhp->ep,
Joe Perchesa9a42882017-02-09 14:23:51 -08002031 qhp->wq.sq.qid);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002032
2033 /* disassociate the LLP connection */
2034 qhp->attr.llp_stream_handle = NULL;
Steve Wiseaf93fb52010-09-10 11:14:48 -05002035 if (!ep)
2036 ep = qhp->ep;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002037 qhp->ep = NULL;
Steve Wise2f5b48c2010-09-10 11:15:36 -05002038 set_state(qhp, C4IW_QP_STATE_ERROR);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002039 free = 1;
Vipul Pandya91e9c0712013-01-07 13:11:51 +00002040 abort = 1;
Steve Wise2f5b48c2010-09-10 11:15:36 -05002041 flush_qp(qhp);
Steve Wise5b3418082014-11-21 09:36:36 -06002042 wake_up(&qhp->wait);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002043out:
Steve Wise2f5b48c2010-09-10 11:15:36 -05002044 mutex_unlock(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002045
2046 if (terminate)
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07002047 post_terminate(qhp, NULL, internal ? GFP_ATOMIC : GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002048
2049 /*
2050 * If disconnect is 1, then we need to initiate a disconnect
2051 * on the EP. This can be a normal close (RTS->CLOSING) or
2052 * an abnormal close (RTS/CLOSING->ERROR).
2053 */
2054 if (disconnect) {
Roland Dreierbe4c9ba2010-05-05 14:45:40 -07002055 c4iw_ep_disconnect(ep, abort, internal ? GFP_ATOMIC :
2056 GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002057 c4iw_put_ep(&ep->com);
2058 }
2059
2060 /*
2061 * If free is 1, then we've disassociated the EP from the QP
2062 * and we need to dereference the EP.
2063 */
2064 if (free)
2065 c4iw_put_ep(&ep->com);
Bharat Potnuri548ddb12017-09-27 13:05:49 +05302066 pr_debug("exit state %d\n", qhp->attr.state);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002067 return ret;
2068}
2069
Shamir Rabinovitchc4367a22019-03-31 19:10:05 +03002070int c4iw_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
Steve Wisecfdda9d2010-04-21 15:30:06 -07002071{
2072 struct c4iw_dev *rhp;
2073 struct c4iw_qp *qhp;
Nirranjan Kirubaharanf70baa72019-05-23 00:05:39 -07002074 struct c4iw_ucontext *ucontext;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002075 struct c4iw_qp_attributes attrs;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002076
2077 qhp = to_c4iw_qp(ib_qp);
2078 rhp = qhp->rhp;
Nirranjan Kirubaharanf70baa72019-05-23 00:05:39 -07002079 ucontext = qhp->ucontext;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002080
2081 attrs.next_state = C4IW_QP_STATE_ERROR;
Kumar Sanghvid2fe99e2011-09-25 20:17:44 +05302082 if (qhp->attr.state == C4IW_QP_STATE_TERMINATE)
2083 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
2084 else
2085 c4iw_modify_qp(rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002086 wait_event(qhp->wait, !qhp->ep);
2087
Matthew Wilcox2f431292019-02-20 16:20:51 -08002088 xa_lock_irq(&rhp->qps);
2089 __xa_erase(&rhp->qps, qhp->wq.sq.qid);
Steve Wise05eb2382014-03-14 21:52:08 +05302090 if (!list_empty(&qhp->db_fc_entry))
2091 list_del_init(&qhp->db_fc_entry);
Matthew Wilcox2f431292019-02-20 16:20:51 -08002092 xa_unlock_irq(&rhp->qps);
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05302093 free_ird(rhp, qhp->attr.max_ird);
Steve Wise05eb2382014-03-14 21:52:08 +05302094
Steve Wisead61a4c2016-07-29 11:00:54 -07002095 c4iw_qp_rem_ref(ib_qp);
2096
Nirranjan Kirubaharanf70baa72019-05-23 00:05:39 -07002097 wait_for_completion(&qhp->qp_rel_comp);
2098
Bharat Potnuri548ddb12017-09-27 13:05:49 +05302099 pr_debug("ib_qp %p qpid 0x%0x\n", ib_qp, qhp->wq.sq.qid);
Nirranjan Kirubaharanf70baa72019-05-23 00:05:39 -07002100 pr_debug("qhp %p ucontext %p\n", qhp, ucontext);
2101
2102 destroy_qp(&rhp->rdev, &qhp->wq,
2103 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !qhp->srq);
2104
2105 c4iw_put_wr_wait(qhp->wr_waitp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002106 return 0;
2107}
2108
Leon Romanovsky514aee62021-07-23 14:39:50 +03002109int c4iw_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs,
2110 struct ib_udata *udata)
Steve Wisecfdda9d2010-04-21 15:30:06 -07002111{
Leon Romanovsky514aee62021-07-23 14:39:50 +03002112 struct ib_pd *pd = qp->pd;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002113 struct c4iw_dev *rhp;
Leon Romanovsky514aee62021-07-23 14:39:50 +03002114 struct c4iw_qp *qhp = to_c4iw_qp(qp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002115 struct c4iw_pd *php;
2116 struct c4iw_cq *schp;
2117 struct c4iw_cq *rchp;
2118 struct c4iw_create_qp_resp uresp;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302119 unsigned int sqsize, rqsize = 0;
Shamir Rabinovitch89944452019-02-07 18:44:49 +02002120 struct c4iw_ucontext *ucontext = rdma_udata_to_drv_context(
2121 udata, struct c4iw_ucontext, ibucontext);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002122 int ret;
Hariprasad Sa6054df2016-02-05 11:43:28 +05302123 struct c4iw_mm_entry *sq_key_mm, *rq_key_mm = NULL, *sq_db_key_mm;
2124 struct c4iw_mm_entry *rq_db_key_mm = NULL, *ma_sync_key_mm = NULL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002125
Jason Gunthorpe1f11a762020-10-03 20:20:08 -03002126 if (attrs->qp_type != IB_QPT_RC || attrs->create_flags)
Leon Romanovsky514aee62021-07-23 14:39:50 +03002127 return -EOPNOTSUPP;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002128
2129 php = to_c4iw_pd(pd);
2130 rhp = php->rhp;
2131 schp = get_chp(rhp, ((struct c4iw_cq *)attrs->send_cq)->cq.cqid);
2132 rchp = get_chp(rhp, ((struct c4iw_cq *)attrs->recv_cq)->cq.cqid);
2133 if (!schp || !rchp)
Leon Romanovsky514aee62021-07-23 14:39:50 +03002134 return -EINVAL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002135
2136 if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
Leon Romanovsky514aee62021-07-23 14:39:50 +03002137 return -EINVAL;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002138
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302139 if (!attrs->srq) {
2140 if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
Leon Romanovsky514aee62021-07-23 14:39:50 +03002141 return -E2BIG;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302142 rqsize = attrs->cap.max_recv_wr + 1;
2143 if (rqsize < 8)
2144 rqsize = 8;
2145 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002146
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +05302147 if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
Leon Romanovsky514aee62021-07-23 14:39:50 +03002148 return -E2BIG;
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +05302149 sqsize = attrs->cap.max_send_wr + 1;
2150 if (sqsize < 8)
2151 sqsize = 8;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002152
Steve Wise2015f262017-09-26 13:13:17 -07002153 qhp->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
Leon Romanovsky514aee62021-07-23 14:39:50 +03002154 if (!qhp->wr_waitp)
2155 return -ENOMEM;
Steve Wise7088a9b2017-09-26 13:11:36 -07002156
Steve Wisecfdda9d2010-04-21 15:30:06 -07002157 qhp->wq.sq.size = sqsize;
Hariprasad Shenai66eb19a2014-07-21 20:55:15 +05302158 qhp->wq.sq.memsize =
2159 (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2160 sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
Steve Wise1cf24dc2013-08-06 21:04:35 +05302161 qhp->wq.sq.flush_cidx = -1;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302162 if (!attrs->srq) {
2163 qhp->wq.rq.size = rqsize;
2164 qhp->wq.rq.memsize =
2165 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2166 sizeof(*qhp->wq.rq.queue);
2167 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002168
2169 if (ucontext) {
2170 qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302171 if (!attrs->srq)
2172 qhp->wq.rq.memsize =
2173 roundup(qhp->wq.rq.memsize, PAGE_SIZE);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002174 }
2175
Steve Wisecfdda9d2010-04-21 15:30:06 -07002176 ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
Steve Wise7088a9b2017-09-26 13:11:36 -07002177 ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302178 qhp->wr_waitp, !attrs->srq);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002179 if (ret)
Steve Wise7088a9b2017-09-26 13:11:36 -07002180 goto err_free_wr_wait;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002181
2182 attrs->cap.max_recv_wr = rqsize - 1;
2183 attrs->cap.max_send_wr = sqsize - 1;
2184 attrs->cap.max_inline_data = T4_MAX_SEND_INLINE;
2185
2186 qhp->rhp = rhp;
2187 qhp->attr.pd = php->pdid;
2188 qhp->attr.scq = ((struct c4iw_cq *) attrs->send_cq)->cq.cqid;
2189 qhp->attr.rcq = ((struct c4iw_cq *) attrs->recv_cq)->cq.cqid;
2190 qhp->attr.sq_num_entries = attrs->cap.max_send_wr;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002191 qhp->attr.sq_max_sges = attrs->cap.max_send_sge;
2192 qhp->attr.sq_max_sges_rdma_write = attrs->cap.max_send_sge;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302193 if (!attrs->srq) {
2194 qhp->attr.rq_num_entries = attrs->cap.max_recv_wr;
2195 qhp->attr.rq_max_sges = attrs->cap.max_recv_sge;
2196 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002197 qhp->attr.state = C4IW_QP_STATE_IDLE;
2198 qhp->attr.next_state = C4IW_QP_STATE_IDLE;
2199 qhp->attr.enable_rdma_read = 1;
2200 qhp->attr.enable_rdma_write = 1;
2201 qhp->attr.enable_bind = 1;
Hariprasad Shenai4c2c5762014-07-14 21:34:52 +05302202 qhp->attr.max_ord = 0;
2203 qhp->attr.max_ird = 0;
Steve Wiseba32de92014-03-19 17:44:43 +05302204 qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002205 spin_lock_init(&qhp->lock);
Steve Wise2f5b48c2010-09-10 11:15:36 -05002206 mutex_init(&qhp->mutex);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002207 init_waitqueue_head(&qhp->wait);
Nirranjan Kirubaharanf70baa72019-05-23 00:05:39 -07002208 init_completion(&qhp->qp_rel_comp);
2209 refcount_set(&qhp->qp_refcnt, 1);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002210
Matthew Wilcox2f431292019-02-20 16:20:51 -08002211 ret = xa_insert_irq(&rhp->qps, qhp->wq.sq.qid, qhp, GFP_KERNEL);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002212 if (ret)
Steve Wise7088a9b2017-09-26 13:11:36 -07002213 goto err_destroy_qp;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002214
Leon Romanovsky9950acf2017-10-29 21:34:35 +02002215 if (udata && ucontext) {
Hariprasad Sa6054df2016-02-05 11:43:28 +05302216 sq_key_mm = kmalloc(sizeof(*sq_key_mm), GFP_KERNEL);
2217 if (!sq_key_mm) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07002218 ret = -ENOMEM;
Steve Wise7088a9b2017-09-26 13:11:36 -07002219 goto err_remove_handle;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002220 }
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302221 if (!attrs->srq) {
2222 rq_key_mm = kmalloc(sizeof(*rq_key_mm), GFP_KERNEL);
2223 if (!rq_key_mm) {
2224 ret = -ENOMEM;
2225 goto err_free_sq_key;
2226 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002227 }
Hariprasad Sa6054df2016-02-05 11:43:28 +05302228 sq_db_key_mm = kmalloc(sizeof(*sq_db_key_mm), GFP_KERNEL);
2229 if (!sq_db_key_mm) {
Steve Wisecfdda9d2010-04-21 15:30:06 -07002230 ret = -ENOMEM;
Steve Wise7088a9b2017-09-26 13:11:36 -07002231 goto err_free_rq_key;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002232 }
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302233 if (!attrs->srq) {
2234 rq_db_key_mm =
2235 kmalloc(sizeof(*rq_db_key_mm), GFP_KERNEL);
2236 if (!rq_db_key_mm) {
2237 ret = -ENOMEM;
2238 goto err_free_sq_db_key;
2239 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002240 }
Dan Carpenter8001b712018-08-02 10:56:13 +03002241 memset(&uresp, 0, sizeof(uresp));
Steve Wisec6d7b262010-09-13 11:23:57 -05002242 if (t4_sq_onchip(&qhp->wq.sq)) {
Hariprasad Sa6054df2016-02-05 11:43:28 +05302243 ma_sync_key_mm = kmalloc(sizeof(*ma_sync_key_mm),
2244 GFP_KERNEL);
2245 if (!ma_sync_key_mm) {
Steve Wisec6d7b262010-09-13 11:23:57 -05002246 ret = -ENOMEM;
Steve Wise7088a9b2017-09-26 13:11:36 -07002247 goto err_free_rq_db_key;
Steve Wisec6d7b262010-09-13 11:23:57 -05002248 }
2249 uresp.flags = C4IW_QPF_ONCHIP;
Dan Carpenter8001b712018-08-02 10:56:13 +03002250 }
Potnuri Bharat Tejab9855f42018-08-02 11:33:03 +05302251 if (rhp->rdev.lldi.write_w_imm_support)
2252 uresp.flags |= C4IW_QPF_WRITE_W_IMM;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002253 uresp.qid_mask = rhp->rdev.qpmask;
2254 uresp.sqid = qhp->wq.sq.qid;
2255 uresp.sq_size = qhp->wq.sq.size;
2256 uresp.sq_memsize = qhp->wq.sq.memsize;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302257 if (!attrs->srq) {
2258 uresp.rqid = qhp->wq.rq.qid;
2259 uresp.rq_size = qhp->wq.rq.size;
2260 uresp.rq_memsize = qhp->wq.rq.memsize;
2261 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002262 spin_lock(&ucontext->mmap_lock);
Hariprasad Sa6054df2016-02-05 11:43:28 +05302263 if (ma_sync_key_mm) {
Steve Wisec6d7b262010-09-13 11:23:57 -05002264 uresp.ma_sync_key = ucontext->key;
2265 ucontext->key += PAGE_SIZE;
2266 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002267 uresp.sq_key = ucontext->key;
2268 ucontext->key += PAGE_SIZE;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302269 if (!attrs->srq) {
2270 uresp.rq_key = ucontext->key;
2271 ucontext->key += PAGE_SIZE;
2272 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002273 uresp.sq_db_gts_key = ucontext->key;
2274 ucontext->key += PAGE_SIZE;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302275 if (!attrs->srq) {
2276 uresp.rq_db_gts_key = ucontext->key;
2277 ucontext->key += PAGE_SIZE;
2278 }
Steve Wisecfdda9d2010-04-21 15:30:06 -07002279 spin_unlock(&ucontext->mmap_lock);
Leon Romanovsky34d56892019-05-20 09:54:31 +03002280 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
Steve Wisecfdda9d2010-04-21 15:30:06 -07002281 if (ret)
Steve Wise7088a9b2017-09-26 13:11:36 -07002282 goto err_free_ma_sync_key;
Hariprasad Sa6054df2016-02-05 11:43:28 +05302283 sq_key_mm->key = uresp.sq_key;
2284 sq_key_mm->addr = qhp->wq.sq.phys_addr;
2285 sq_key_mm->len = PAGE_ALIGN(qhp->wq.sq.memsize);
2286 insert_mmap(ucontext, sq_key_mm);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302287 if (!attrs->srq) {
2288 rq_key_mm->key = uresp.rq_key;
2289 rq_key_mm->addr = virt_to_phys(qhp->wq.rq.queue);
2290 rq_key_mm->len = PAGE_ALIGN(qhp->wq.rq.memsize);
2291 insert_mmap(ucontext, rq_key_mm);
2292 }
Hariprasad Sa6054df2016-02-05 11:43:28 +05302293 sq_db_key_mm->key = uresp.sq_db_gts_key;
2294 sq_db_key_mm->addr = (u64)(unsigned long)qhp->wq.sq.bar2_pa;
2295 sq_db_key_mm->len = PAGE_SIZE;
2296 insert_mmap(ucontext, sq_db_key_mm);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302297 if (!attrs->srq) {
2298 rq_db_key_mm->key = uresp.rq_db_gts_key;
2299 rq_db_key_mm->addr =
2300 (u64)(unsigned long)qhp->wq.rq.bar2_pa;
2301 rq_db_key_mm->len = PAGE_SIZE;
2302 insert_mmap(ucontext, rq_db_key_mm);
2303 }
Hariprasad Sa6054df2016-02-05 11:43:28 +05302304 if (ma_sync_key_mm) {
2305 ma_sync_key_mm->key = uresp.ma_sync_key;
2306 ma_sync_key_mm->addr =
2307 (pci_resource_start(rhp->rdev.lldi.pdev, 0) +
2308 PCIE_MA_SYNC_A) & PAGE_MASK;
2309 ma_sync_key_mm->len = PAGE_SIZE;
2310 insert_mmap(ucontext, ma_sync_key_mm);
Steve Wisec6d7b262010-09-13 11:23:57 -05002311 }
Steve Wisec12a67f2016-12-22 07:40:36 -08002312
Steve Wisec12a67f2016-12-22 07:40:36 -08002313 qhp->ucontext = ucontext;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002314 }
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302315 if (!attrs->srq) {
2316 qhp->wq.qp_errp =
2317 &qhp->wq.rq.queue[qhp->wq.rq.size].status.qp_err;
2318 } else {
2319 qhp->wq.qp_errp =
2320 &qhp->wq.sq.queue[qhp->wq.sq.size].status.qp_err;
2321 qhp->wq.srqidxp =
2322 &qhp->wq.sq.queue[qhp->wq.sq.size].status.srqidx;
2323 }
2324
Steve Wisecfdda9d2010-04-21 15:30:06 -07002325 qhp->ibqp.qp_num = qhp->wq.sq.qid;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302326 if (attrs->srq)
2327 qhp->srq = to_c4iw_srq(attrs->srq);
Steve Wise05eb2382014-03-14 21:52:08 +05302328 INIT_LIST_HEAD(&qhp->db_fc_entry);
Bharat Potnuri548ddb12017-09-27 13:05:49 +05302329 pr_debug("sq id %u size %u memsize %zu num_entries %u rq id %u size %u memsize %zu num_entries %u\n",
Joe Perchesa9a42882017-02-09 14:23:51 -08002330 qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
2331 attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
2332 qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
Leon Romanovsky514aee62021-07-23 14:39:50 +03002333 return 0;
Steve Wise7088a9b2017-09-26 13:11:36 -07002334err_free_ma_sync_key:
Hariprasad Sa6054df2016-02-05 11:43:28 +05302335 kfree(ma_sync_key_mm);
Steve Wise7088a9b2017-09-26 13:11:36 -07002336err_free_rq_db_key:
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302337 if (!attrs->srq)
2338 kfree(rq_db_key_mm);
Steve Wise7088a9b2017-09-26 13:11:36 -07002339err_free_sq_db_key:
Hariprasad Sa6054df2016-02-05 11:43:28 +05302340 kfree(sq_db_key_mm);
Steve Wise7088a9b2017-09-26 13:11:36 -07002341err_free_rq_key:
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302342 if (!attrs->srq)
2343 kfree(rq_key_mm);
Steve Wise7088a9b2017-09-26 13:11:36 -07002344err_free_sq_key:
Hariprasad Sa6054df2016-02-05 11:43:28 +05302345 kfree(sq_key_mm);
Steve Wise7088a9b2017-09-26 13:11:36 -07002346err_remove_handle:
Matthew Wilcox2f431292019-02-20 16:20:51 -08002347 xa_erase_irq(&rhp->qps, qhp->wq.sq.qid);
Steve Wise7088a9b2017-09-26 13:11:36 -07002348err_destroy_qp:
Steve Wisecfdda9d2010-04-21 15:30:06 -07002349 destroy_qp(&rhp->rdev, &qhp->wq,
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302350 ucontext ? &ucontext->uctx : &rhp->rdev.uctx, !attrs->srq);
Steve Wise7088a9b2017-09-26 13:11:36 -07002351err_free_wr_wait:
Steve Wise2015f262017-09-26 13:13:17 -07002352 c4iw_put_wr_wait(qhp->wr_waitp);
Leon Romanovsky514aee62021-07-23 14:39:50 +03002353 return ret;
Steve Wisecfdda9d2010-04-21 15:30:06 -07002354}
2355
2356int c4iw_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2357 int attr_mask, struct ib_udata *udata)
2358{
2359 struct c4iw_dev *rhp;
2360 struct c4iw_qp *qhp;
2361 enum c4iw_qp_attr_mask mask = 0;
Leon Romanovsky34d56892019-05-20 09:54:31 +03002362 struct c4iw_qp_attributes attrs = {};
Steve Wisecfdda9d2010-04-21 15:30:06 -07002363
Bharat Potnuri548ddb12017-09-27 13:05:49 +05302364 pr_debug("ib_qp %p\n", ibqp);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002365
Jason Gunthorpe26e990b2020-10-03 20:20:06 -03002366 if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS)
2367 return -EOPNOTSUPP;
2368
Steve Wisecfdda9d2010-04-21 15:30:06 -07002369 /* iwarp does not support the RTR state */
2370 if ((attr_mask & IB_QP_STATE) && (attr->qp_state == IB_QPS_RTR))
2371 attr_mask &= ~IB_QP_STATE;
2372
2373 /* Make sure we still have something left to do */
2374 if (!attr_mask)
2375 return 0;
2376
Steve Wisecfdda9d2010-04-21 15:30:06 -07002377 qhp = to_c4iw_qp(ibqp);
2378 rhp = qhp->rhp;
2379
2380 attrs.next_state = c4iw_convert_state(attr->qp_state);
2381 attrs.enable_rdma_read = (attr->qp_access_flags &
2382 IB_ACCESS_REMOTE_READ) ? 1 : 0;
2383 attrs.enable_rdma_write = (attr->qp_access_flags &
2384 IB_ACCESS_REMOTE_WRITE) ? 1 : 0;
2385 attrs.enable_bind = (attr->qp_access_flags & IB_ACCESS_MW_BIND) ? 1 : 0;
2386
2387
2388 mask |= (attr_mask & IB_QP_STATE) ? C4IW_QP_ATTR_NEXT_STATE : 0;
2389 mask |= (attr_mask & IB_QP_ACCESS_FLAGS) ?
2390 (C4IW_QP_ATTR_ENABLE_RDMA_READ |
2391 C4IW_QP_ATTR_ENABLE_RDMA_WRITE |
2392 C4IW_QP_ATTR_ENABLE_RDMA_BIND) : 0;
2393
Vipul Pandya2c974782012-05-18 15:29:28 +05302394 /*
2395 * Use SQ_PSN and RQ_PSN to pass in IDX_INC values for
2396 * ringing the queue db when we're in DB_FULL mode.
Steve Wisec2f9da92014-04-24 14:32:04 -05002397 * Only allow this on T4 devices.
Vipul Pandya2c974782012-05-18 15:29:28 +05302398 */
2399 attrs.sq_db_inc = attr->sq_psn;
2400 attrs.rq_db_inc = attr->rq_psn;
2401 mask |= (attr_mask & IB_QP_SQ_PSN) ? C4IW_QP_ATTR_SQ_DB : 0;
2402 mask |= (attr_mask & IB_QP_RQ_PSN) ? C4IW_QP_ATTR_RQ_DB : 0;
Hariprasad S963cab52015-09-23 17:19:27 +05302403 if (!is_t4(to_c4iw_qp(ibqp)->rhp->rdev.lldi.adapter_type) &&
Steve Wisec2f9da92014-04-24 14:32:04 -05002404 (mask & (C4IW_QP_ATTR_SQ_DB|C4IW_QP_ATTR_RQ_DB)))
2405 return -EINVAL;
Vipul Pandya2c974782012-05-18 15:29:28 +05302406
Steve Wisecfdda9d2010-04-21 15:30:06 -07002407 return c4iw_modify_qp(rhp, qhp, mask, &attrs, 0);
2408}
2409
2410struct ib_qp *c4iw_get_qp(struct ib_device *dev, int qpn)
2411{
Bharat Potnuri548ddb12017-09-27 13:05:49 +05302412 pr_debug("ib_dev %p qpn 0x%x\n", dev, qpn);
Steve Wisecfdda9d2010-04-21 15:30:06 -07002413 return (struct ib_qp *)get_qhp(to_c4iw_dev(dev), qpn);
2414}
Vipul Pandya67bbc052012-05-18 15:29:33 +05302415
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302416void c4iw_dispatch_srq_limit_reached_event(struct c4iw_srq *srq)
2417{
Bart Van Asschedd708e72018-07-31 08:51:30 -07002418 struct ib_event event = {};
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302419
2420 event.device = &srq->rhp->ibdev;
2421 event.element.srq = &srq->ibsrq;
2422 event.event = IB_EVENT_SRQ_LIMIT_REACHED;
2423 ib_dispatch_event(&event);
2424}
2425
2426int c4iw_modify_srq(struct ib_srq *ib_srq, struct ib_srq_attr *attr,
2427 enum ib_srq_attr_mask srq_attr_mask,
2428 struct ib_udata *udata)
2429{
2430 struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
2431 int ret = 0;
2432
2433 /*
2434 * XXX 0 mask == a SW interrupt for srq_limit reached...
2435 */
2436 if (udata && !srq_attr_mask) {
2437 c4iw_dispatch_srq_limit_reached_event(srq);
2438 goto out;
2439 }
2440
2441 /* no support for this yet */
2442 if (srq_attr_mask & IB_SRQ_MAX_WR) {
2443 ret = -EINVAL;
2444 goto out;
2445 }
2446
2447 if (!udata && (srq_attr_mask & IB_SRQ_LIMIT)) {
2448 srq->armed = true;
2449 srq->srq_limit = attr->srq_limit;
2450 }
2451out:
2452 return ret;
2453}
2454
Vipul Pandya67bbc052012-05-18 15:29:33 +05302455int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
2456 int attr_mask, struct ib_qp_init_attr *init_attr)
2457{
2458 struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
2459
Leon Romanovsky34d56892019-05-20 09:54:31 +03002460 memset(attr, 0, sizeof(*attr));
2461 memset(init_attr, 0, sizeof(*init_attr));
Vipul Pandya67bbc052012-05-18 15:29:33 +05302462 attr->qp_state = to_ib_qp_state(qhp->attr.state);
Kamal Heibe375b9c2021-12-20 17:25:30 +02002463 attr->cur_qp_state = to_ib_qp_state(qhp->attr.state);
Hariprasad Shenai3e5c02c2014-07-21 20:55:14 +05302464 init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
2465 init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
2466 init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
Kamal Heiba3721732021-01-14 21:14:23 +02002467 init_attr->cap.max_recv_sge = qhp->attr.rq_max_sges;
Hariprasad Shenai3e5c02c2014-07-21 20:55:14 +05302468 init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
2469 init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
Vipul Pandya67bbc052012-05-18 15:29:33 +05302470 return 0;
2471}
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302472
2473static void free_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2474 struct c4iw_wr_wait *wr_waitp)
2475{
2476 struct c4iw_rdev *rdev = &srq->rhp->rdev;
2477 struct sk_buff *skb = srq->destroy_skb;
2478 struct t4_srq *wq = &srq->wq;
2479 struct fw_ri_res_wr *res_wr;
2480 struct fw_ri_res *res;
2481 int wr_len;
2482
2483 wr_len = sizeof(*res_wr) + sizeof(*res);
2484 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2485
2486 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2487 memset(res_wr, 0, wr_len);
2488 res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2489 FW_RI_RES_WR_NRES_V(1) |
2490 FW_WR_COMPL_F);
2491 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2492 res_wr->cookie = (uintptr_t)wr_waitp;
2493 res = res_wr->res;
2494 res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2495 res->u.srq.op = FW_RI_RES_OP_RESET;
2496 res->u.srq.srqid = cpu_to_be32(srq->idx);
2497 res->u.srq.eqid = cpu_to_be32(wq->qid);
2498
2499 c4iw_init_wr_wait(wr_waitp);
2500 c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, 0, __func__);
2501
2502 dma_free_coherent(&rdev->lldi.pdev->dev,
2503 wq->memsize, wq->queue,
Christoph Hellwig18b01b12018-10-09 16:08:22 +02002504 dma_unmap_addr(wq, mapping));
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302505 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2506 kfree(wq->sw_rq);
2507 c4iw_put_qpid(rdev, wq->qid, uctx);
2508}
2509
2510static int alloc_srq_queue(struct c4iw_srq *srq, struct c4iw_dev_ucontext *uctx,
2511 struct c4iw_wr_wait *wr_waitp)
2512{
2513 struct c4iw_rdev *rdev = &srq->rhp->rdev;
2514 int user = (uctx != &rdev->uctx);
2515 struct t4_srq *wq = &srq->wq;
2516 struct fw_ri_res_wr *res_wr;
2517 struct fw_ri_res *res;
2518 struct sk_buff *skb;
2519 int wr_len;
2520 int eqsize;
2521 int ret = -ENOMEM;
2522
2523 wq->qid = c4iw_get_qpid(rdev, uctx);
2524 if (!wq->qid)
2525 goto err;
2526
2527 if (!user) {
2528 wq->sw_rq = kcalloc(wq->size, sizeof(*wq->sw_rq),
2529 GFP_KERNEL);
2530 if (!wq->sw_rq)
2531 goto err_put_qpid;
2532 wq->pending_wrs = kcalloc(srq->wq.size,
2533 sizeof(*srq->wq.pending_wrs),
2534 GFP_KERNEL);
2535 if (!wq->pending_wrs)
2536 goto err_free_sw_rq;
2537 }
2538
2539 wq->rqt_size = wq->size;
2540 wq->rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rqt_size);
2541 if (!wq->rqt_hwaddr)
2542 goto err_free_pending_wrs;
2543 wq->rqt_abs_idx = (wq->rqt_hwaddr - rdev->lldi.vr->rq.start) >>
2544 T4_RQT_ENTRY_SHIFT;
2545
Luis Chamberlain750afb02019-01-04 09:23:09 +01002546 wq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, wq->memsize,
2547 &wq->dma_addr, GFP_KERNEL);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302548 if (!wq->queue)
2549 goto err_free_rqtpool;
2550
Christoph Hellwig18b01b12018-10-09 16:08:22 +02002551 dma_unmap_addr_set(wq, mapping, wq->dma_addr);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302552
Nathan Chancellor1b571082018-09-24 12:29:03 -07002553 wq->bar2_va = c4iw_bar2_addrs(rdev, wq->qid, CXGB4_BAR2_QTYPE_EGRESS,
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302554 &wq->bar2_qid,
2555 user ? &wq->bar2_pa : NULL);
2556
2557 /*
2558 * User mode must have bar2 access.
2559 */
2560
2561 if (user && !wq->bar2_va) {
2562 pr_warn(MOD "%s: srqid %u not in BAR2 range.\n",
2563 pci_name(rdev->lldi.pdev), wq->qid);
2564 ret = -EINVAL;
2565 goto err_free_queue;
2566 }
2567
2568 /* build fw_ri_res_wr */
2569 wr_len = sizeof(*res_wr) + sizeof(*res);
2570
Jason Gunthorpe8ba0ddd2019-01-20 02:27:13 +01002571 skb = alloc_skb(wr_len, GFP_KERNEL);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302572 if (!skb)
2573 goto err_free_queue;
2574 set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
2575
2576 res_wr = (struct fw_ri_res_wr *)__skb_put(skb, wr_len);
2577 memset(res_wr, 0, wr_len);
2578 res_wr->op_nres = cpu_to_be32(FW_WR_OP_V(FW_RI_RES_WR) |
2579 FW_RI_RES_WR_NRES_V(1) |
2580 FW_WR_COMPL_F);
2581 res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
2582 res_wr->cookie = (uintptr_t)wr_waitp;
2583 res = res_wr->res;
2584 res->u.srq.restype = FW_RI_RES_TYPE_SRQ;
2585 res->u.srq.op = FW_RI_RES_OP_WRITE;
2586
2587 /*
2588 * eqsize is the number of 64B entries plus the status page size.
2589 */
2590 eqsize = wq->size * T4_RQ_NUM_SLOTS +
2591 rdev->hw_queue.t4_eq_status_entries;
2592 res->u.srq.eqid = cpu_to_be32(wq->qid);
2593 res->u.srq.fetchszm_to_iqid =
2594 /* no host cidx updates */
2595 cpu_to_be32(FW_RI_RES_WR_HOSTFCMODE_V(0) |
2596 FW_RI_RES_WR_CPRIO_V(0) | /* don't keep in chip cache */
2597 FW_RI_RES_WR_PCIECHN_V(0) | /* set by uP at ri_init time */
2598 FW_RI_RES_WR_FETCHRO_V(0)); /* relaxed_ordering */
2599 res->u.srq.dcaen_to_eqsize =
2600 cpu_to_be32(FW_RI_RES_WR_DCAEN_V(0) |
2601 FW_RI_RES_WR_DCACPU_V(0) |
2602 FW_RI_RES_WR_FBMIN_V(2) |
2603 FW_RI_RES_WR_FBMAX_V(3) |
2604 FW_RI_RES_WR_CIDXFTHRESHO_V(0) |
2605 FW_RI_RES_WR_CIDXFTHRESH_V(0) |
2606 FW_RI_RES_WR_EQSIZE_V(eqsize));
2607 res->u.srq.eqaddr = cpu_to_be64(wq->dma_addr);
2608 res->u.srq.srqid = cpu_to_be32(srq->idx);
2609 res->u.srq.pdid = cpu_to_be32(srq->pdid);
2610 res->u.srq.hwsrqsize = cpu_to_be32(wq->rqt_size);
2611 res->u.srq.hwsrqaddr = cpu_to_be32(wq->rqt_hwaddr -
2612 rdev->lldi.vr->rq.start);
2613
2614 c4iw_init_wr_wait(wr_waitp);
2615
2616 ret = c4iw_ref_send_wait(rdev, skb, wr_waitp, 0, wq->qid, __func__);
2617 if (ret)
2618 goto err_free_queue;
2619
2620 pr_debug("%s srq %u eqid %u pdid %u queue va %p pa 0x%llx\n"
2621 " bar2_addr %p rqt addr 0x%x size %d\n",
2622 __func__, srq->idx, wq->qid, srq->pdid, wq->queue,
2623 (u64)virt_to_phys(wq->queue), wq->bar2_va,
2624 wq->rqt_hwaddr, wq->rqt_size);
2625
2626 return 0;
2627err_free_queue:
2628 dma_free_coherent(&rdev->lldi.pdev->dev,
2629 wq->memsize, wq->queue,
Christoph Hellwig18b01b12018-10-09 16:08:22 +02002630 dma_unmap_addr(wq, mapping));
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302631err_free_rqtpool:
2632 c4iw_rqtpool_free(rdev, wq->rqt_hwaddr, wq->rqt_size);
2633err_free_pending_wrs:
2634 if (!user)
2635 kfree(wq->pending_wrs);
2636err_free_sw_rq:
2637 if (!user)
2638 kfree(wq->sw_rq);
2639err_put_qpid:
2640 c4iw_put_qpid(rdev, wq->qid, uctx);
2641err:
2642 return ret;
2643}
2644
2645void c4iw_copy_wr_to_srq(struct t4_srq *srq, union t4_recv_wr *wqe, u8 len16)
2646{
2647 u64 *src, *dst;
2648
2649 src = (u64 *)wqe;
2650 dst = (u64 *)((u8 *)srq->queue + srq->wq_pidx * T4_EQ_ENTRY_SIZE);
2651 while (len16) {
2652 *dst++ = *src++;
2653 if (dst >= (u64 *)&srq->queue[srq->size])
2654 dst = (u64 *)srq->queue;
2655 *dst++ = *src++;
2656 if (dst >= (u64 *)&srq->queue[srq->size])
2657 dst = (u64 *)srq->queue;
2658 len16--;
2659 }
2660}
2661
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002662int c4iw_create_srq(struct ib_srq *ib_srq, struct ib_srq_init_attr *attrs,
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302663 struct ib_udata *udata)
2664{
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002665 struct ib_pd *pd = ib_srq->pd;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302666 struct c4iw_dev *rhp;
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002667 struct c4iw_srq *srq = to_c4iw_srq(ib_srq);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302668 struct c4iw_pd *php;
2669 struct c4iw_create_srq_resp uresp;
2670 struct c4iw_ucontext *ucontext;
2671 struct c4iw_mm_entry *srq_key_mm, *srq_db_key_mm;
2672 int rqsize;
2673 int ret;
2674 int wr_len;
2675
Jason Gunthorpe652caba2020-10-03 20:20:05 -03002676 if (attrs->srq_type != IB_SRQT_BASIC)
2677 return -EOPNOTSUPP;
2678
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302679 pr_debug("%s ib_pd %p\n", __func__, pd);
2680
2681 php = to_c4iw_pd(pd);
2682 rhp = php->rhp;
2683
2684 if (!rhp->rdev.lldi.vr->srq.size)
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002685 return -EINVAL;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302686 if (attrs->attr.max_wr > rhp->rdev.hw_queue.t4_max_rq_size)
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002687 return -E2BIG;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302688 if (attrs->attr.max_sge > T4_MAX_RECV_SGE)
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002689 return -E2BIG;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302690
2691 /*
2692 * SRQ RQT and RQ must be a power of 2 and at least 16 deep.
2693 */
2694 rqsize = attrs->attr.max_wr + 1;
2695 rqsize = roundup_pow_of_two(max_t(u16, rqsize, 16));
2696
Shamir Rabinovitch89944452019-02-07 18:44:49 +02002697 ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2698 ibucontext);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302699
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302700 srq->wr_waitp = c4iw_alloc_wr_wait(GFP_KERNEL);
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002701 if (!srq->wr_waitp)
2702 return -ENOMEM;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302703
2704 srq->idx = c4iw_alloc_srq_idx(&rhp->rdev);
2705 if (srq->idx < 0) {
2706 ret = -ENOMEM;
2707 goto err_free_wr_wait;
2708 }
2709
2710 wr_len = sizeof(struct fw_ri_res_wr) + sizeof(struct fw_ri_res);
2711 srq->destroy_skb = alloc_skb(wr_len, GFP_KERNEL);
2712 if (!srq->destroy_skb) {
2713 ret = -ENOMEM;
2714 goto err_free_srq_idx;
2715 }
2716
2717 srq->rhp = rhp;
2718 srq->pdid = php->pdid;
2719
2720 srq->wq.size = rqsize;
2721 srq->wq.memsize =
2722 (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
2723 sizeof(*srq->wq.queue);
2724 if (ucontext)
2725 srq->wq.memsize = roundup(srq->wq.memsize, PAGE_SIZE);
2726
2727 ret = alloc_srq_queue(srq, ucontext ? &ucontext->uctx :
2728 &rhp->rdev.uctx, srq->wr_waitp);
2729 if (ret)
2730 goto err_free_skb;
2731 attrs->attr.max_wr = rqsize - 1;
2732
2733 if (CHELSIO_CHIP_VERSION(rhp->rdev.lldi.adapter_type) > CHELSIO_T6)
2734 srq->flags = T4_SRQ_LIMIT_SUPPORT;
2735
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302736 if (udata) {
2737 srq_key_mm = kmalloc(sizeof(*srq_key_mm), GFP_KERNEL);
2738 if (!srq_key_mm) {
2739 ret = -ENOMEM;
Potnuri Bharat Teja91724c12019-09-30 13:11:19 +05302740 goto err_free_queue;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302741 }
2742 srq_db_key_mm = kmalloc(sizeof(*srq_db_key_mm), GFP_KERNEL);
2743 if (!srq_db_key_mm) {
2744 ret = -ENOMEM;
2745 goto err_free_srq_key_mm;
2746 }
Dan Carpenter8001b712018-08-02 10:56:13 +03002747 memset(&uresp, 0, sizeof(uresp));
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302748 uresp.flags = srq->flags;
2749 uresp.qid_mask = rhp->rdev.qpmask;
2750 uresp.srqid = srq->wq.qid;
2751 uresp.srq_size = srq->wq.size;
2752 uresp.srq_memsize = srq->wq.memsize;
2753 uresp.rqt_abs_idx = srq->wq.rqt_abs_idx;
2754 spin_lock(&ucontext->mmap_lock);
2755 uresp.srq_key = ucontext->key;
2756 ucontext->key += PAGE_SIZE;
2757 uresp.srq_db_gts_key = ucontext->key;
2758 ucontext->key += PAGE_SIZE;
2759 spin_unlock(&ucontext->mmap_lock);
2760 ret = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
2761 if (ret)
2762 goto err_free_srq_db_key_mm;
2763 srq_key_mm->key = uresp.srq_key;
2764 srq_key_mm->addr = virt_to_phys(srq->wq.queue);
2765 srq_key_mm->len = PAGE_ALIGN(srq->wq.memsize);
2766 insert_mmap(ucontext, srq_key_mm);
2767 srq_db_key_mm->key = uresp.srq_db_gts_key;
2768 srq_db_key_mm->addr = (u64)(unsigned long)srq->wq.bar2_pa;
2769 srq_db_key_mm->len = PAGE_SIZE;
2770 insert_mmap(ucontext, srq_db_key_mm);
2771 }
2772
2773 pr_debug("%s srq qid %u idx %u size %u memsize %lu num_entries %u\n",
2774 __func__, srq->wq.qid, srq->idx, srq->wq.size,
2775 (unsigned long)srq->wq.memsize, attrs->attr.max_wr);
2776
2777 spin_lock_init(&srq->lock);
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002778 return 0;
2779
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302780err_free_srq_db_key_mm:
2781 kfree(srq_db_key_mm);
2782err_free_srq_key_mm:
2783 kfree(srq_key_mm);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302784err_free_queue:
2785 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2786 srq->wr_waitp);
2787err_free_skb:
zhong jiang26f91da2018-09-20 17:52:42 +08002788 kfree_skb(srq->destroy_skb);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302789err_free_srq_idx:
2790 c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2791err_free_wr_wait:
2792 c4iw_put_wr_wait(srq->wr_waitp);
Leon Romanovsky68e326d2019-04-03 16:42:43 +03002793 return ret;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302794}
2795
Leon Romanovsky119181d2020-09-07 15:09:16 +03002796int c4iw_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302797{
2798 struct c4iw_dev *rhp;
2799 struct c4iw_srq *srq;
2800 struct c4iw_ucontext *ucontext;
2801
2802 srq = to_c4iw_srq(ibsrq);
2803 rhp = srq->rhp;
2804
2805 pr_debug("%s id %d\n", __func__, srq->wq.qid);
Shamir Rabinovitchbdeacab2019-03-31 19:10:06 +03002806 ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
2807 ibucontext);
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302808 free_srq_queue(srq, ucontext ? &ucontext->uctx : &rhp->rdev.uctx,
2809 srq->wr_waitp);
2810 c4iw_free_srq_idx(&rhp->rdev, srq->idx);
2811 c4iw_put_wr_wait(srq->wr_waitp);
Leon Romanovsky119181d2020-09-07 15:09:16 +03002812 return 0;
Raju Rangoju6a0b6172018-07-25 21:22:14 +05302813}